query
stringlengths
7
9.55k
document
stringlengths
10
363k
metadata
dict
negatives
sequencelengths
0
101
negative_scores
sequencelengths
0
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Use callbacks to share common setup or constraints between actions.
def set_order_detali @order_detali = OrderDetali.find(params[:id]) end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_required_actions\n # TODO: check what fields change to asign required fields\n end", "def action_hook; end", "def run_actions; end", "def define_action_hook; end", "def actions; end", "def define_action_helpers\n if super && action == :save\n @instance_helper_module.class_eval do\n define_method(:valid?) do |*args|\n self.class.state_machines.fire_event_attributes(self, :save, false) { super(*args) }\n end\n end\n end\n end", "def add_actions; end", "def callbacks; end", "def callbacks; end", "def setup *actions, &proc\n (@setup_procs ||= []) << [proc, actions.size > 0 ? actions : [:*]]\n end", "def define_action_helpers; end", "def post_setup\n end", "def action_methods; end", "def action_methods; end", "def action_methods; end", "def before_setup; end", "def action_run\n end", "def execute(setup)\n @action.call(setup)\n end", "def define_action_helpers?; end", "def set_actions\n actions :all\n end", "def action_done(action)\n dispatch = { :migrate => :done_migrating, :map => :done_mapping, :reduce =>\n :done_reducing, :finalize => :done_finalizing } \n self.send dispatch[action[:action]], action\n end", "def dependencies action, &block\n @actions.each do |other|\n if action[:requires].include? other[:provide]\n block.call other\n end\n end\n end", "def setup!\n return unless @setup_procs\n http_actions = actions\n @setup_procs.each do |setup_proc|\n proc, actions = setup_proc\n @setup__actions = actions.map do |action|\n\n action.is_a?(Regexp) ?\n http_actions.select { |a| a.to_s =~ action } :\n action.is_a?(String) && action =~ /\\A\\./ ?\n http_actions.map { |a| a.to_s << action if format?(a).include?(action) }.compact :\n action\n\n end.flatten\n self.class_exec &proc\n @setup__actions = nil\n end\n @setup_procs = nil\n end", "def before_actions(*logic)\n self.before_actions = logic\n end", "def setup_handler\n end", "def set_action(opts)\n opts = check_params(opts,[:actions])\n super(opts)\n end", "def setup(action)\n @targets.clear\n unless action.item.target_filters.empty?\n @targets = SES::TargetManager.make_targets(action)\n else\n item = action.item\n if item.for_opponent?\n @targets = $game_troop.alive_members\n elsif item.for_dead_friend?\n @targets = $game_party.battle_members.select { |actor| actor.dead? }\n else\n $game_party.battle_members.select { |actor| actor.alive? }\n end\n end\n @item_max = @targets.size\n create_contents\n refresh\n show\n activate\n end", "def action; end", "def action; end", "def action; end", "def action; end", "def action; end", "def workflow\n end", "def revisable_shared_setup(args, block)\n class << self\n attr_accessor :revisable_options\n end\n options = args.extract_options!\n self.revisable_options = Options.new(options, &block)\n \n self.send(:include, Common)\n self.send(:extend, Validations) unless self.revisable_options.no_validation_scoping?\n self.send(:include, WithoutScope::QuotedColumnConditions)\n end", "def setup\n @action = SampleActionAndroid.new(os_name: 'android',\n app_name: APP_PATH)\n end", "def before(action)\n invoke_callbacks *self.class.send(action).before\n end", "def process_action(...)\n send_action(...)\n end", "def before_dispatch(env); end", "def after_actions(*logic)\n self.after_actions = logic\n end", "def setup\n # override and do something appropriate\n end", "def setup(client)\n return unless @setup\n actions = @setup['setup'].select { |action| action['do'] }.map { |action| Action.new(action['do']) }\n actions.each do |action|\n action.execute(client)\n end\n self\n end", "def setup(_context)\n end", "def setup(resources) ; end", "def validate_actions\n errors.add(:base, :should_give_at_least_one_action) if !manage? && !forecasting? && !read? && !api?\n end", "def setup\n @resource_config = {\n :callbacks => {\n :before_create => nil,\n :after_create => nil,\n :before_update => nil,\n :after_update => nil,\n :before_destroy => nil,\n :after_destroy => nil,\n },\n :child_assoc => nil,\n :model => nil,\n :parent => nil,\n :path => nil,\n :permission => {},\n :properties => {},\n :relation => {\n :create => nil,\n :delete => nil,\n },\n :roles => nil,\n }\n end", "def determine_valid_action\n\n end", "def process_shared\n handle_taxes\n handle_shippings\n create_adjustments_from_params\n handle_status\n handle_inventory_refunds\n handle_payment_transactions\n order.updater.update\n end", "def startcompany(action)\n @done = true\n action.setup\n end", "def init_actions\n am = action_manager()\n am.add_action(Action.new(\"&Disable selection\") { @selection_mode = :none; unbind_key(32); bind_key(32, :scroll_forward); } )\n am.add_action(Action.new(\"&Edit Toggle\") { @edit_toggle = !@edit_toggle; $status_message.value = \"Edit toggle is #{@edit_toggle}\" })\n end", "def event_callbacks(event, metadata={})\n case event\n when :reset, :review\n if confirmed\n update_attributes(confirmed: false)\n end\n when :confirm\n confirm\n # trigger :order for all applicable items\n # NOTE: :order event is common to both physical and digital items\n items.each do |i|\n if i.event_permitted(:order)\n user_id = last_transition.user_id\n i.trigger!(:order, { order_id: id, user_id: user_id })\n end\n end\n when :complete_work\n request = metadata[:request]\n work_complete_notification(request)\n when :close\n close\n end\n if event != :close && !open\n reopen\n end\n end", "def setup_action\n return unless PONY::ERRNO::check_sequence(current_act)\n new_sequence = @action_sequence[@sequence_index+1...@action_sequence.size]\n @sequence_index = 0\n new_sequence = DND::SkillSequence::ACTS[@acts[1]] + new_sequence\n execute_sequence\n end", "def define_tasks\n define_weave_task\n connect_common_tasks\n end", "def setup(&block)\n define_method(:setup, &block)\n end", "def setup\n transition_to(:setup)\n end", "def setup\n transition_to(:setup)\n end", "def action\n end", "def setup( *args )\n\t\t\tself.class.setupBlocks.each {|sblock|\n\t\t\t\tdebugMsg \"Calling setup block method #{sblock}\"\n\t\t\t\tself.send( sblock )\n\t\t\t}\n\t\t\tsuper( *args )\n\t\tend", "def config(action, *args); end", "def setup\n @setup_proc.call(self) if @setup_proc\n end", "def before_action \n end", "def setup_callbacks\n defined_callbacks.each do |meth|\n unless respond_to?(\"call_#{meth}_callbacks\".to_sym)\n self.class.module_eval <<-EOE\n def call_#{meth}_callbacks(*args)\n plugin_store.each {|a| a.call_#{meth}_callbacks(*args) } if respond_to?(:plugin_store) && plugin_store\n self.send :#{meth}, *args if respond_to?(:#{meth})\n end\n EOE\n end\n end\n end", "def action\n end", "def matt_custom_action_begin(label); end", "def setup\n # override this if needed\n end", "def setup\n\t\t\t\t\t\t# Do nothing\n\t\t\t\tend", "def setup\n\t\t\t\t\t\t# Do nothing\n\t\t\t\tend", "def action(options,&callback)\n new_action = Action===options ? options : Action.new(options,&callback)\n # replace any with (shared name/alias or both default) + same arity\n @actions.delete_if do |existing_action|\n ((existing_action.names & new_action.names).size > 0 ||\n existing_action.default? && new_action.default?) &&\n existing_action.required.size == new_action.required.size &&\n existing_action.optional.size <= new_action.optional.size\n end\n @actions = (@actions + [new_action]).sort\n new_action\n end", "def set_target_and_action target, action\n self.target = target\n self.action = 'sugarcube_handle_action:'\n @sugarcube_action = action\n end", "def after(action)\n invoke_callbacks *options_for(action).after\n end", "def pre_task\n end", "def setup(server)\n server.on('beforeMethod', method(:before_method), 10)\n end", "def add_actions\n attribute = machine.attribute\n name = self.name\n \n owner_class.class_eval do\n define_method(name) {self.class.state_machines[attribute].events[name].fire(self)}\n define_method(\"#{name}!\") {self.class.state_machines[attribute].events[name].fire!(self)}\n define_method(\"can_#{name}?\") {self.class.state_machines[attribute].events[name].can_fire?(self)}\n end\n end", "def init_actions\n @select_action = SelectAction.new\n @endpoint_mouse_action = EndpointMouseAction.new\n @move_action = MoveAction.new\n end", "def setup_signals; end", "def after_created\r\n return unless compile_time\r\n Array(action).each do |action|\r\n run_action(action)\r\n end\r\nend", "def after_created\r\n return unless compile_time\r\n Array(action).each do |action|\r\n run_action(action)\r\n end\r\nend", "def set_target_and_action target, action\n self.target = target\n self.action = 'sugarcube_handle_action:'\n @sugarcube_action = action.respond_to?('weak!') ? action.weak! : action\n end", "def initialize(*args)\n super\n @action = :set\nend", "def after_set_callback; end", "def setup\n #implement in subclass;\n end", "def lookup_action; end", "def setup &block\n if block_given?\n @setup = block\n else\n @setup.call\n end\n end", "def setup_action\n return TSBS.error(@acts[0], 1, @used_sequence) if @acts.size < 2\n actions = TSBS::AnimLoop[@acts[1]]\n if actions.nil?\n show_action_error(@acts[1])\n end\n @sequence_stack.push(@acts[1])\n @used_sequence = @acts[1]\n actions.each do |acts|\n @acts = acts\n execute_sequence\n break if @break_action\n end\n @sequence_stack.pop\n @used_sequence = @sequence_stack[-1]\n end", "def release_actions; end", "def around_hooks; end", "def save_action; end", "def setup(easy)\n super\n easy.customrequest = @verb\n end", "def action_target()\n \n end", "def setup\n callback(:setup) do\n notify(:setup)\n migration_check.last_deployed_commit\n end\n end", "def setup\n return unless @setup\n\n actions = @setup['setup'].select { |action| action['do'] }.map { |action| Action.new(action['do']) }\n run_actions_and_retry(actions)\n self\n end", "def before_setup\n # do nothing by default\n end", "def my_actions(options)\n @setup = false\n get_template_part(\"custom_used\",\"action_users\",true)\n end", "def default_action; end", "def setup(&blk)\n @setup_block = blk\n end", "def callback_phase\n super\n end", "def advice\n end", "def _handle_action_missing(*args); end", "def duas1(action)\n action.call\n action.call\nend", "def shared_action(name, &block)\n @controller.shared_actions[name] = block\n end", "def before_action action, &block\n @audience[:before][action] ||= Set.new\n @audience[:before][action] << block\n end", "def setup_initial_state\n\n state_a = State.new(\"a\", 0)\n state_b = State.new(\"b\", 0)\n state_c = State.new(\"c\", 10)\n\n move_to_b = Action.new(\"move_to_b\", 1, state_b)\n\n move_to_c = Action.new(\"move_to_c\", 1, state_c)\n\n state_a.actions = [move_to_b, move_to_c]\n\n return state_a\n \nend" ]
[ "0.6163163", "0.6045976", "0.5946146", "0.591683", "0.5890051", "0.58349305", "0.5776858", "0.5703237", "0.5703237", "0.5652805", "0.5621621", "0.54210985", "0.5411113", "0.5411113", "0.5411113", "0.5391541", "0.53794575", "0.5357573", "0.53402257", "0.53394014", "0.53321576", "0.53124547", "0.529654", "0.5296262", "0.52952296", "0.52600986", "0.52442724", "0.52385926", "0.52385926", "0.52385926", "0.52385926", "0.52385926", "0.5232394", "0.523231", "0.5227454", "0.52226824", "0.52201617", "0.5212327", "0.52079266", "0.52050185", "0.51754695", "0.51726824", "0.51710224", "0.5166172", "0.5159343", "0.51578903", "0.51522785", "0.5152022", "0.51518047", "0.51456624", "0.51398855", "0.5133759", "0.5112076", "0.5111866", "0.5111866", "0.5110294", "0.5106169", "0.509231", "0.50873137", "0.5081088", "0.508059", "0.50677156", "0.50562143", "0.5050554", "0.50474834", "0.50474834", "0.5036181", "0.5026331", "0.5022976", "0.5015441", "0.50121695", "0.5000944", "0.5000019", "0.4996878", "0.4989888", "0.4989888", "0.49864885", "0.49797225", "0.49785787", "0.4976161", "0.49683493", "0.4965126", "0.4958034", "0.49559742", "0.4954353", "0.49535993", "0.4952725", "0.49467874", "0.49423352", "0.49325448", "0.49282882", "0.49269363", "0.49269104", "0.49252945", "0.4923091", "0.49194667", "0.49174926", "0.49173003", "0.49171105", "0.4915879", "0.49155936" ]
0.0
-1
Never trust parameters from the scary internet, only allow the white list through.
def order_detali_params params.require(:order_detali).permit(:order_id, :book_id, :price) end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strong_params\n params.require(:user).permit(param_whitelist)\n end", "def strong_params\n params.require(:listing_member).permit(param_whitelist)\n end", "def allow_params_authentication!; end", "def allowed_params\n ALLOWED_PARAMS\n end", "def default_param_whitelist\n [\"mode\"]\n end", "def param_whitelist\n [:role, :title]\n end", "def expected_permitted_parameter_names; end", "def safe_params\n params.except(:host, :port, :protocol).permit!\n end", "def strong_params\n params.require(:team_member).permit(param_whitelist)\n end", "def permitir_parametros\n \t\tparams.permit!\n \tend", "def strong_params\n params.require(:community).permit(param_whitelist)\n end", "def permitted_strong_parameters\n :all #or an array of parameters, example: [:name, :email]\n end", "def strong_params\n params.require(:education).permit(param_whitelist)\n end", "def restricted_params\n #params.require(self.controller_name.classify.underscore.to_sym).permit([])\n raise(\"No strong params set, override restricted_params method in your controller. E.g. params.require(:model).permit(:attribute1, :attribute2)\")\n end", "def allowed_params\n params.require(:user).permit(:username, :email, :password, :password_confirmation)\n end", "def param_whitelist\n [:rating, :review]\n end", "def param_whitelist\n whitelist = [\n :username, :name,\n :parent_id,\n :headline, :description, :video,\n :policy, :signup_mode, :category,\n :website, :facebook, :twitter, :linkedin,\n :founded_at,\n privacy: [\n :events,\n :resources\n ],\n permission: [\n :profile,\n :members,\n :children,\n :statistics,\n :posts,\n :listings,\n :resources,\n :events\n ],\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n \n if action_name === 'update'\n whitelist.delete(:parent_id)\n unless current_user.role_in(@community) === 'owner'\n whitelist.delete(:privacy)\n whitelist.delete(:permission)\n end\n end\n \n whitelist\n end", "def param_whitelist\n if @user.present? && current_user != @user\n return [:followed]\n end\n \n whitelist = [\n :username, :email, :password,\n :first_name, :last_name,\n :birthday, :gender,\n :headline, :biography, :ask_about, :focus,\n :website, :facebook, :linkedin, :twitter, :github,\n roles: [],\n skills: [],\n interests: [],\n privacy: { contact: [] },\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n \n if action_name === 'update'\n whitelist.delete(:email)\n whitelist.delete(:password)\n end\n \n whitelist\n end", "def user_params \n \tparams.require(:user).permit(:name, :email, :password, :password_confirmation)# preventing CSTR\n end", "def user_params\n params.permit(:name, :phoneNumber, :address, :postalCode, :local, :link, :counter, :latitude, :longitude) \n end", "def valid_params_request?; end", "def strong_params\n params.require(:experience).permit(param_whitelist)\n end", "def trim_whitelisted(params, whitelist)\n # remove any parameters that are not whitelisted\n params.each do |key, value|\n # if white listed\n if whitelist.include? key\n # strip the parameters of any extra spaces, save as string\n params[key] = value.to_s.strip\n else\n # delete any unauthorized parameters\n params.delete key\n end\n end\n params\n end", "def whitelist_url_params\n params.require(:whitelist_url).permit(:domain)\n end", "def allowed_params\n params.require(:allowed).permit(:email)\n end", "def permitted_params\n []\n end", "def trim_whitelisted(params, whitelist)\n # remove any parameters that are not whitelisted\n params.each do |key, value|\n # if white listed\n if whitelist.include? key\n # strip the parameters of any extra spaces, save as string\n params[key] = value.to_s.strip\n else\n # delete any unauthorized parameters\n params.delete key\n end\n end\n params\n end", "def safe_params\n params.permit(:id, :name, :origin, :emails => []); #emails is an array\n end", "def query_param\n\t\tparams.permit(:first_name, :last_name, :phone)\n\tend", "def strong_params\n params.require(:success_metric).permit(param_whitelist)\n end", "def devise_filter\r\n logger.debug(\"In devise_filter =>PARAMS: #{params.inspect}\")\r\n\r\n # White list for sign_up\r\n devise_parameter_sanitizer.for(:sign_up) { |u| u.permit(user_whitelist) }\r\n\r\n # White list for account update\r\n devise_parameter_sanitizer.for(:account_update) { |u| u.permit(user_whitelist, :current_password) }\r\n\r\n # White list for Invitation creation\r\n devise_parameter_sanitizer.for(:invite) { |u| u.permit(:account_type, :email, :invitation_token)}\r\n\r\n # White list for accept invitation\r\n devise_parameter_sanitizer.for(:accept_invitation) { |u| u.permit(user_whitelist, :invitation_token)}\r\n\r\n end", "def whitelisted_user_params\n params.require(:user).\n permit( :first_name, :last_name, :email,:password,:password_confirmation,:birthday,:gender)\n end", "def user_params\n ActionController::Parameters.permit_all_parameters = true\n params.require(:user) #.permit(:name, :surname, :phone, :password, :email, :time_zone)\n end", "def strong_params\n params.require(:metric_change).permit(param_whitelist)\n end", "def safe_params\n params.require(:user).permit(:name)\n end", "def get_params\n\t\treturn ActionController::Parameters.new(self.attributes).permit(\"account_id\", \"title\", \"category\", \"introduction\", \"tags\", \"segment_type\", \"visible\", \"status\", \"main_image\")\n\tend", "def grant_params\n @whitelisted = params.require(:grant).permit(:name, :description, :agency_id, :acronym)\n end", "def check_params; true; end", "def param_whitelist\n whitelist = [\n :description,\n :progress,\n :kpi_id\n ]\n \n unless action_name === 'create'\n whitelist.delete(:kpi_id)\n end\n \n whitelist\n end", "def quote_params\n params.permit!\n end", "def valid_params?; end", "def paramunold_params\n params.require(:paramunold).permit!\n end", "def user_params\n\t\tparams.permit(:nickname, :avatar, :description, :password, :gender, :birthday, :email, :phone, :qq_id, :wechat_id)\n\tend", "def filtered_parameters; end", "def user_params\n params.permit(\n \t:id,\n \t:email, \n \t:first_name, \n \t:last_name, \n \t:password, \n \t:confirm_token, \n \t:phone_number,\n \t:facebook_link,\n \t:car_model,\n \t:license_plate)\n end", "def filtering_params\n params.permit(:email, :name)\n end", "def check_params\n true\n end", "def wx_public_params\n params.require(:wx_public).permit(:nickname, :manager, :alias)\n end", "def allowed_params\n params.require(:user).permit(:email, :password, :role, :first_name, :last_name, :password_confirmation)\n end", "def allowed_params\n params.require(:user).permit(:email, :password, :role, :first_name, :last_name, :password_confirmation)\n end", "def listing_params\n\t\tparams.permit(:address, :transit_info, :rules, :other_info, :lat, :lng)\n\tend", "def social_account_params\n\t\t\tparams.require(:social_account).permit!\n\t\tend", "def safe_params\n resurce_name = self.class.resource_name\n params_method_name = \"#{resurce_name}_params\".to_sym\n if params[resurce_name]\n if respond_to?(params_method_name) || private_methods.include?(params_method_name)\n send(params_method_name)\n else\n raise ActiveModel::ForbiddenAttributesError, \"Please, define the '#{params_method_name}' method in #{self.class.name}\"\n end\n end\n end", "def url_params\n params.require(:url).permit(:short_url, :original_url, :clicks, :ip_addresses)\n end", "def user_params\n params.require(:user).permit(:uri, :username, :password, :realname, :email, :publicvisible)\n end", "def model_params\n\t\tparams.require(:manager).permit(\n\t :user_name,\n :password,\n :email,\n \t\t\t)\n\tend", "def article_params_whitelist\n params.require(:article).permit(:title, :description, category_ids: [])\n end", "def college_whitelist_params\n params.require(:college_whitelist).permit(:status)\n end", "def active_code_params\n params[:active_code].permit\n end", "def filtering_params\n params.permit(:email)\n end", "def valid_params(params)\n params.permit(:user_id, :photo_id, :originX, :originY, :width, :height)\n end", "def ip_address_params\n\t\t\tparams.require(:ip_address).permit!\n end", "def pull_request_params\n whitelist = [\n :url,\n :id,\n :html_url,\n :diff_url,\n :patch_url,\n :issue_url,\n :number,\n :state,\n :locked,\n :title\n ]\n params.require(:pull_request).permit(whitelist)\n end", "def reserved_params\n params.require(:reserved).permit(:name, :email, :pax, :address, :KTP, :title)\n end", "def post_params\n if current_user.admin? \n params.permit(:title, :body, :city, :country, :gps_location, :privacy, :visible, :latitude, :longitude, images: [], files: [])\n else \n params.permit(:title, :body, :city, :country, :gps_location, :privacy,:latitude, :longitude, images: [], files: [])\n end \n end", "def list_params\n params.permit(:name)\n end", "def filter_parameters; end", "def filter_parameters; end", "def vineyard_params\n params.permit(:vineyard_name, :email, :website_url, :phone, :address, :city, :region, :postcode, :country, :specialty, :description, :pet_friendly, :holiday, :tours, :events, :family_friendly, :cover_image, :image_one, :image_two, :image_three, :image_four, :user_id, :base64)\n end", "def available_activity_params\n # params.require(:available_activity).permit(:type,:geometry,:properties)\n whitelisted = ActionController::Parameters.new({\n type: params.require(:available_activity)[:type],\n geometry: params.require(:available_activity)[:geometry].try(:permit!).to_h,\n properties: params.require(:available_activity)[:properties].try(:permit!).to_h\n }).try(:permit!)\n end", "def user_params\n params.permit(:name, :username, :email, :password, :img_url, :bg_url, :coinbank)\n end", "def user_params_pub\n\t \tparams[:user].permit(:hruid)\n\t end", "def user_params\n params.permit(:id, :email, :password, :nickname, :status, :avatar, :flat_picture, :flatsharing_id, :member,\n :user, :color, :solde)\n end", "def validate_search_inputs\n @whitelisted = params.fetch(:user, nil)\n if @whitelisted.blank?\n render_error(400, \"#{I18n.t('general_error.params_missing_key')}\": [I18n.t('general_error.params_missing_value', model: \"review\")])\n return\n else\n @whitelisted = @whitelisted.permit(:name, :uen, :description)\n end\n end", "def param_whitelist\n [\n :title,\n :description,\n :organization,\n :team_id,\n :started_at,\n :finished_at,\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n end", "def url_whitelist; end", "def admin_social_network_params\n params.require(:social_network).permit!\n end", "def filter_params\n params.require(:filters).permit(:letters)\n end", "def origin_params\n params.permit(:country, :state, :city, :postal_code, :address, :description)\n end", "def valid_params(params)\n params.permit(:login, :first_name, :last_name, \n :password, :password_confirmation)\n end", "def sensitive_params=(params)\n @sensitive_params = params\n end", "def permit_request_params\n params.permit(:address)\n end", "def user_params\n # Ensure a user can't give themselves admin priveleges\n params.delete(:admin) if current_user.admin?\n params.require(:user).permit(:name, :email, :admin, :image)\n end", "def secure_params\n params.require(:location).permit(:name)\n end", "def strong_params\n params.require( :setting ).\n permit( :global_scan_limit, :per_user_scan_limit,\n :target_whitelist_patterns, :target_blacklist_patterns )\n end", "def question_params\n params.require(:survey_question).permit(question_whitelist)\n end", "def case_insensitive_params\n params.require(:case_insensitive).permit(:name)\n end", "def empire_master_no_match_params\n params.require(:empire_master_no_match).permit(:uid, :last_name, :list, :search_date, :double, :source)\n end", "def maintenance_request_params\n params[:maintenance_request].permit! #allow all parameters for now\n end", "def unwanted_params\n params.require(:unwanted).permit(:title, :description, :image)\n end", "def url_params\n params[:url].permit(:full)\n end", "def backend_user_params\n params.permit!\n end", "def filter_params\n\t\treturn params[:candidate].permit(:name_for_filter)\n\tend", "def speed_measurement_params\n\n #fuckit, to lazy to deal with permit crap right now\n ActionController::Parameters.permit_all_parameters = true\n\n params[:speed_measurement]\n end", "def user_params\n params.permit(:name, :age, :username, :display_photo, :password)\n end", "def get_params\r\n #params.require(:article).permit(:title, :permalink, :content, :source_site, :introtext, :type_id, :order_by, :searchable, :created_by, :edited_by, :published_by, :published_on, :user_id)\r\n params.require(:article).permit!\r\n\r\n end", "def pub_params\n params.require(:pub).permit(:name, :description, :phone, :email, :hidden, :city_id, :address)\n end", "def pass_params\n params[:pass].permit(:name, :price, :description, :colour, :events)\n end", "def droptraining_params\n params.permit(:training_id,:user_id, :utf8, :authenticity_token, :commit)\n end", "def person_params\n # params whitelist does *not* include admin, sub, remember_token\n # TBD: share this whitelist with the list used by configuration_permitted_parameters\n # TBD: should current_password be on this list? -- for now, leaving off, since it seems to work without\n # NOTE: do not include 'admin' in this list!\n params.require(:person).permit(\n :name, \n :email, \n :description,\n :password, \n :password_confirmation\n )\n end", "def parameter_params\n params.require(:parameter).permit(:name, :description, :param_code, :param_value, :active_from, :active_to)\n end" ]
[ "0.69792545", "0.6781151", "0.67419964", "0.674013", "0.6734356", "0.6591046", "0.6502396", "0.6496313", "0.6480641", "0.6477825", "0.64565", "0.6438387", "0.63791263", "0.63740575", "0.6364131", "0.63192815", "0.62991166", "0.62978333", "0.6292148", "0.6290449", "0.6290076", "0.62894756", "0.6283177", "0.6242471", "0.62382483", "0.6217549", "0.6214457", "0.6209053", "0.6193042", "0.6177802", "0.6174604", "0.61714715", "0.6161512", "0.6151757", "0.6150663", "0.61461", "0.61213595", "0.611406", "0.6106206", "0.6105114", "0.6089039", "0.6081015", "0.6071004", "0.60620916", "0.6019971", "0.601788", "0.6011056", "0.6010898", "0.6005122", "0.6005122", "0.6001556", "0.6001049", "0.59943926", "0.5992201", "0.59909594", "0.5990628", "0.5980841", "0.59669393", "0.59589154", "0.5958826", "0.5957911", "0.5957385", "0.5953072", "0.59526145", "0.5943361", "0.59386164", "0.59375334", "0.59375334", "0.5933856", "0.59292704", "0.59254247", "0.5924164", "0.59167904", "0.59088355", "0.5907542", "0.59064597", "0.5906243", "0.5898226", "0.589687", "0.5896091", "0.5894501", "0.5894289", "0.5891739", "0.58860534", "0.5882406", "0.587974", "0.58738774", "0.5869024", "0.58679986", "0.5867561", "0.5865932", "0.5864461", "0.58639693", "0.58617616", "0.5861436", "0.5860451", "0.58602303", "0.5854586", "0.58537364", "0.5850427", "0.5850199" ]
0.0
-1
Alter self.development with contents, and optionally save.
def apply!(options={}) should_ignore_conflict = options.fetch(:ignore_conflict, false) should_save = options.fetch(:save, true) if !should_ignore_conflict return false unless applyable? end apply(options) if should_save # Maybe too much responsibility? # Or, this should be a transaction. Feels bloated. development.save if self.save end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def contents=(new_contents)\n debug(\"#{self.class.name}#contents= called, going to update containing model's #{attr_name}\")\n @contents = new_contents\n write\n end", "def edit\n require 'ruby-debug'; debugger\n if storage.respond_to?(\"json_file\")\n output \"#{cyan(\"Boom!\")} #{Platform.edit(storage.json_file)}\"\n else\n output \"This storage backend #{red storage.class} does not store #{cyan(\"Boom!\")} data on your computer\"\n end\n end", "def set_development\n @development = Development.find(params[:id])\n end", "def set_development_work\n @development_work = DevelopmentWork.find(params[:id])\n end", "def update\n @development = Development.find(params[:id])\n\n respond_to do |format|\n if @development.update_attributes(params[:development])\n format.html { redirect_to @development, notice: 'Development was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @development.errors, status: :unprocessable_entity }\n end\n end\n end", "def save\n return unless dirty? && config.respond_to?(:write)\n\n config.write\n clean_up\n end", "def modify_database_yml_as_needed!\n require 'yaml'\n\n db_yaml_file = File.join('config', 'database.yml')\n db_yaml = YAML.load_file(db_yaml_file)\n\n unless db_yaml[rails_env]\n notify(\"adding environment '#{rails_env}' to database.yml\") do\n test_content = db_yaml['test']\n raise \"No default database.yml entry for 'test'?!?\" unless test_content\n\n db_yaml[rails_env] = test_content.dup\n new_yaml = YAML.dump(db_yaml)\n # Get rid of the silly '---' line that YAML.dump puts at the start.\n new_yaml = new_yaml.split(\"\\n\").map { |l| l unless l =~ /^\\-+$/i }.compact.join(\"\\n\")\n File.open(db_yaml_file, 'w') { |f| f.puts new_yaml }\n end\n end\n end", "def save\n\t\t\tsuper\n\t\t\t@@env = self\n\t\tend", "def touch\n @saved = false\n end", "def touch_contents\n return unless respond_to?(:contents)\n\n contents.update_all(updated_at: Time.current)\n end", "def update\n tmp_file = Rails.root.join('tmp', 'deployment-rc.yml')\n self.to_file tmp_file\n self.class.replace tmp_file\n end", "def set_content\n unless compare_content\n description = []\n description << \"update content in file #{@new_resource.path} from #{short_cksum(@current_resource.checksum)} to #{short_cksum(new_resource_content_checksum)}\"\n description << diff_current_from_content(@new_resource.content) \n converge_by(description) do\n backup @new_resource.path if ::File.exists?(@new_resource.path)\n ::File.open(@new_resource.path, \"w\") {|f| f.write @new_resource.content }\n Chef::Log.info(\"#{@new_resource} contents updated\")\n end\n end\n end", "def content=(value)\n self.temp_files = [value].flatten.select do |f|\n FILE_CLASSES.member?(f.class.name)\n end\n # correctly triggering dirty\n if temp_files.present?\n write_attribute(:content, nil)\n content_will_change!\n else\n write_attribute(:content, value)\n end\n end", "def edit_with_rexml\n require 'rexml/document'\n doc = REXML::Document.new(read)\n yield doc if block_given?\n self.content = doc.to_s\n save\n end", "def set_data_storage_development\n @data_storage_development = DataStorageDevelopment.find(params[:id])\n end", "def update_db(cont)\n db = File.open('./database', 'w')\n db.write(Marshal.dump(cont))\n db.close\n end", "def update!(**args)\n @content = args[:content] if args.key?(:content)\n @debugging_info = args[:debugging_info] if args.key?(:debugging_info)\n @metadata = args[:metadata] if args.key?(:metadata)\n @preview_frame_zero = args[:preview_frame_zero] if args.key?(:preview_frame_zero)\n end", "def set_activity_development\n @activity_development = ActivityDevelopment.find(params[:id])\n end", "def write_config\n # Allow disabling the local settings.\n return unless new_resource.local_settings_path\n file new_resource.local_settings_path do\n content new_resource.local_settings_content\n mode '640'\n owner new_resource.parent.owner\n group new_resource.parent.group\n end\n end", "def update_asset_settings!\n AssetSettings[:development].files[@file_id] = @sfiles.map(&:local_url)\n return unless AssetMapper.compile?\n\n AssetSettings[:production].files[@file_id] = @cloud_files.map(&:production_url)\n AssetSettings[:production].files[@file_id] << @tfile.production_url if @tfile.production_url\n\n AssetSettings[:local_assets].files[@file_id] = @cloud_files.map(&:local_url)\n AssetSettings[:local_assets][@file_id] << @tfile.local_url if @tfile.local_url\n end", "def save\n # Nothing in base class. This should be used to persist settings in\n # subclasses that use files.\n end", "def set_child_development\n @child_development = ChildDevelopment.find(params[:id])\n end", "def development\r\n\r\n end", "def edit\n yield if block_given?\n save\n end", "def update!(**args)\n @contents = args[:contents] if args.key?(:contents)\n end", "def update!(**args)\n @contents = args[:contents] if args.key?(:contents)\n end", "def save()\n @env.sync(true)\n self\n end", "def persist!\n ::File.write(self.path, Marshal.dump(self))\n rescue => e\n puts e.message\n exit\n end", "def update\n @welcom_development = WelcomDevelopment.find(params[:id])\n\n respond_to do |format|\n if @welcom_development.update_attributes(params[:welcom_development])\n format.html { redirect_to @welcom_development, notice: 'Welcom development was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @welcom_development.errors, status: :unprocessable_entity }\n end\n end\n end", "def edit_temp(opts = {})\n temp_file do |f|\n f.puts(opts[:temp])\n f.flush\n f.close(false)\n invoke_editor(f.path, opts[:line], true)\n @content = File.read(f.path)\n end\n end", "def persist\n settings = {\n area: @focus[:key]\n }\n File.open(@path, 'w') do |file|\n file.write settings.to_yaml\n end\n end", "def edit_with_nokogiri\n doc = Nokogiri.XML(read)\n yield doc if block_given?\n self.content = doc.to_xml\n save\n end", "def update\n #authorize @development\n respond_to do |format|\n if @development.update(development_params)\n format.html { redirect_to edit_admin_development_path(@development.id), notice: 'Chủ đầu tư đã được chỉnh sửa thành công.' }\n format.json { render :edit, status: :ok, location: @development }\n else\n format.html { render :edit }\n format.json { render json: @development.errors, status: :unprocessable_entity }\n end\n end\n end", "def edit\r\n @page = Page.find(params[:id])\r\n data = File.read(\"#{Rails.public_path}/#{@page.project_id}/#{@page.page_name}.html\")\r\n @page.update_attribute(:html , data)\r\n end", "def save_edits\n DATABASE.execute(\"UPDATE boards SET title = '#{@title}', description = '#{@description}' WHERE id = #{@id}\")\n end", "def update!(**args)\n @cleared_funding = args[:cleared_funding] if args.key?(:cleared_funding)\n @debug_funding_text_block = args[:debug_funding_text_block] if args.key?(:debug_funding_text_block)\n @doc_part = args[:doc_part] if args.key?(:doc_part)\n @parse_section = args[:parse_section] if args.key?(:parse_section)\n @source = args[:source] if args.key?(:source)\n end", "def dirty!\n @dirty = true\n end", "def dirty!\n @dirty = true\n end", "def content=(content)\n if GalleryConfig.storage.database_notebooks\n notebookFile = NotebookFile.find_or_initialize_by(save_type: \"stage\", uuid: uuid)\n notebookFile.stage_id = id\n notebookFile.content = content\n notebookFile.save\n else\n File.write(filename, content)\n end\n end", "def contents=(params)\n self.content_needs_saving = true\n contents.each do |content|\n update = params[content.association_name][content.id.to_s]\n content.attributes = update if update\n end\n end", "def update\n respond_to do |format|\n if @blog.update(blog_params)\n @blog.short_body = @blog.body_area.to_plain_text.first(250)\n @blog.save!\n format.html { redirect_to @blog, notice: 'Blog was successfully updated.' }\n format.json { render :show, status: :ok, location: @blog }\n else\n format.html { render :edit }\n format.json { render json: @blog.errors, status: :unprocessable_entity }\n end\n end\n end", "def edit_post(name, content)\n\t\tpost = Post.find_by(post_name: name)\n\t\tpost.content = content\n\t\tpost.save!\n\tend", "def update\n @dirty = true\n end", "def dirty!\n @dirty = true\n end", "def edit(title, content, options={})\n create(title, content, {:overwrite => true}.merge(options))\n end", "def edit\n @prompt = \"save\"\n %[<textarea name='settings' class='form' style='width: 100%; height: 30%;'>#{self.yaml.value || BASIC}</textarea><textarea name='editor' class='form' style='width: 100%; height: 70%;'>#{self.md.value || INDEX}</textarea>];\n end", "def update\n @project.update(project_params)\n @project.update(content: @project.content.strip)\n respond_to do |format|\n if @project.valid?\n format.html {render :edit, notice: 'Project was successfully Saved.'}\n format.json {render :show, status: :ok, location: @project}\n else\n format.html {render :edit}\n format.json {render json: @project.errors, status: :unprocessable_entity}\n end\n end\n end", "def content= (new_content)\n @content = new_content\n end", "def update\n DOCUMENT_PATHS.each do |attr_name, path|\n if path.match(/\\*/)\n instance_variable_get(\"@#{attr_name}\").each do |simple_file_name, contents|\n replace_entry(\"word/#{simple_file_name}.xml\", contents.serialize(save_with: 0))\n end\n else\n xml_document = instance_variable_get(\"@#{attr_name}\")\n replace_entry path, xml_document.serialize(save_with: 0) if xml_document\n end\n end\n end", "def update!(**args)\n @content = args[:content] if args.key?(:content)\n @text_segments = args[:text_segments] if args.key?(:text_segments)\n end", "def update!(**args)\n @contents = args[:contents] if args.key?(:contents)\n @default_viewport = args[:default_viewport] if args.key?(:default_viewport)\n @description = args[:description] if args.key?(:description)\n @id = args[:id] if args.key?(:id)\n @name = args[:name] if args.key?(:name)\n @project_id = args[:project_id] if args.key?(:project_id)\n end", "def update\n replace_entry \"word/document.xml\", doc.serialize(:save_with => 0)\n end", "def edit\n load_data\n end", "def save\n if modified? and @entries and [email protected]?\n save!\n end\n end", "def save\n return if @content.nil?\n put_rest \"extra/#{@name}\", @content, :content_type => \"application/octet-stream\"\n end", "def update_file_content(new_content)\n return false if new_content.nil?\n self.file_content = new_content\n self.save\n end", "def update_file_content(new_content)\n return false if new_content.nil?\n self.file_content = new_content\n self.save\n end", "def save_design_doc_on(db)\n update_design_doc(Design.new(design_doc), db)\n end", "def dirty!(software)\n raise ProjectAlreadyDirty.new(self) if culprit\n\n @culprit = software\n end", "def mark_content_clean\n @content_dirty = false\n end", "def edit(host, ip, plataform)\n #verify if we have the plataform name initialized\n if(! (@plataforms.keys.include? plataform))\n add(host,ip,plataform)\n else\n #replace the old with the new host\n if( @plataforms[plataform].edit(Host.new(ip, host, @plataforms[plataform])) )\n puts \"Edited succefully.\"\n end\n end\n\n if(@verbosity)\n puts \"Generating Files ...\"\n end\n\n generateFiles! @plataforms[plataform]\n\n end", "def edit_file(filename, content, options={})\n end", "def set_app()\n self.is_app = 1\n save()\n end", "def update\n respond_to do |format|\n if @activity_development.update(activity_development_params)\n format.html { redirect_to @activity_development, notice: 'Activity development was successfully updated.' }\n format.json { render :show, status: :ok, location: @activity_development }\n else\n format.html { render :edit }\n format.json { render json: @activity_development.errors, status: :unprocessable_entity }\n end\n end\n end", "def update_file_content(new_content)\n return false if new_content.nil?\n self.file_content = new_content\n return self.save\n end", "def do_edit(names)\n regenerate_file(path, 0644) do |new, old|\n if old\n old.each_line do |line|\n new.write(yield(line))\n end\n\n else\n new.write(<<END\n127.0.0.1 #{(names + %w(localhost)).join(' ')}\n::1 #{(names + %w(localhost ip6-localhost ip6-loopback)).join(' ')}\nEND\n )\n end\n end\n end", "def autosave; end", "def set_embedded_config_data\n @embedded = 1\n end", "def set_embedded_config_data\n @embedded = 1\n end", "def saveEdit\n @project_edit = @project.project_edits.create(description: edit_params[:project_edit])\n @project_edit.user = current_user\n puts @project_edit.description\n respond_to do |format|\n if @project_edit.save\n format.html { redirect_to @project, notice: 'Project was successfully updated.' }\n format.json { render :show, status: :ok, location: @project }\n else\n format.html { render :edit }\n format.json { render json: @project.errors, status: :unprocessable_entity }\n end\n end\n\n end", "def update\n @setting.value = [email protected]\n\n @setting.save\n head 200, content_type: \"text/html\"\n end", "def save_design_doc!\n save_design_doc(true)\n end", "def make_save_contents\n\t\t\tcontents = local_make_save_contents\n\t\t\tcontents[:self_vars] = $game_selfVars\n\t\t\tcontents\n\t\tend", "def set_acquisition_development_maintenance\n @acquisition_development_maintenance = AcquisitionDevelopmentMaintenance.find(params[:id])\n end", "def alter_schemata\n end", "def alter_schemata\n end", "def compact\n load\n with_tmpfile do |path, file|\n # Compactified database has the same size -> return\n return self if @pos == file.write(dump(yield, @format.header))\n with_flock(File::LOCK_EX) do\n # Database was replaced (cleared or compactified) in the meantime\n if @pos != nil\n # Append changed journal records if the database changed during compactification\n file.write(read)\n file.close\n File.rename(path, @file)\n end\n end\n end\n open\n replay\n end", "def save\n return if saved?\n self.saved = true\n original_path = interpolate_path(:original)\n stream.write_to(original_path)\n end", "def save_design_doc\n reset_design_doc unless design_doc_fresh\n self.design_doc = update_design_doc(design_doc)\n end", "def update\n respond_to do |format|\n if @child_development.update(child_development_params)\n format.html { redirect_to @child_development, notice: 'Child development was successfully updated.' }\n format.json { render :show, status: :ok, location: @child_development }\n else\n format.html { render :edit }\n format.json { render json: @child_development.errors, status: :unprocessable_entity }\n end\n end\n end", "def update!(**args)\n @content = args[:content] if args.key?(:content)\n end", "def update!(**args)\n @content = args[:content] if args.key?(:content)\n end", "def update!(**args)\n @content = args[:content] if args.key?(:content)\n end", "def update!(**args)\n @content = args[:content] if args.key?(:content)\n end", "def edit_file(filename, editor, initial_text)\n File.open(filename, 'wb') { |f| f.write initial_text }\n\n editor_thread = open_editor nil, editor, filename\n\n trap(\"INT\") { on_signal filename, editor_thread, nil }\n trap(\"ABRT\") { on_signal filename, editor_thread, nil }\n trap(\"QUIT\") { on_signal filename, editor_thread, nil } unless @on_windows\n\n editor_thread.join\n\n text = File.open(filename, 'rb') { |f| f.read }\n cleanup filename\n\n text\n end", "def update!\n parse\n return true if skipped?\n attributes = Attributes.new klass, @blocks[:attributes]\n attributes.update!\n @blocks[:attributes] = attributes.lines\n update_file\n end", "def preprocess_and_save\n process_attrs_before_write(self)\n return self.save()\n end", "def preprocess_and_save\n process_attrs_before_write(self)\n return self.save()\n end", "def set_rails()\n self.is_rails = 1\n save()\n end", "def update\n save_doc\n end", "def designer\r\n @page = Page.find(params[:id])\r\n data = File.read(\"#{Rails.public_path}/#{@page.project_id}/#{@page.page_name}.html\")\r\n @page.update_attribute(:html , data)\r\n render 'designer'\r\n end", "def reset!\n self.draft = find_original_file!.read\n save!\n end", "def object_edit(field)\n value = data_type_editor(field, AppConfig.data_type[field], AppConfig.initial[field])\n AppConfig.config[field] = value unless value.nil?\n end", "def save_design_doc(force = false)\n update_design_doc(force)\n end", "def update!(**args)\n @contents = args[:contents] if args.key?(:contents)\n @path = args[:path] if args.key?(:path)\n end", "def update\n respond_to do |format|\n if @development_work.update(development_work_params)\n format.html { redirect_to development_works_path(page: prev_page), notice: 'Development work was successfully updated.' }\n format.json { render :show, status: :ok, location: @development_work }\n else\n format.html { render :edit }\n format.json { render json: @development_work.errors, status: :unprocessable_entity }\n end\n end\n end", "def set_web()\n self.is_web = 1\n self.save()\n end", "def set_DB()\n self.is_db = 1\n save()\n end", "def edit\n @base64_input = @project.input\n @output = @project.output\n end", "def undercase_ext_set(ext, transfer, force)\n puts \"Lowering extension: \".green + (ext.empty?? \"*\" : ext)\n\n Modder.status transfer\n return if transfer.empty?\n\n # confirm current changes\n if force || Modder.confirm?\n final = {}\n temp = {}\n\n # create hash temp map\n transfer.each do |k, v|\n tempfile = (v.hash * v.object_id).abs.to_s\n final[tempfile] = v\n temp[k] = tempfile\n end\n\n Modder.execute temp\n Modder.execute final\n puts \"Modifications complete.\"\n else\n puts \"No modifications done.\"\n end\n end", "def set_content(value)\n @document = Nokogiri::XML(value)\n @state = RedXmlResource::STATE_UNCOMMITED if @document #Resource can now be stored in database\n end" ]
[ "0.56714255", "0.5660797", "0.56165534", "0.55975044", "0.5422964", "0.52603084", "0.5232118", "0.5193916", "0.51735365", "0.51671183", "0.51043427", "0.5090216", "0.5084106", "0.50835913", "0.5033807", "0.5005091", "0.4988624", "0.49694535", "0.4950044", "0.49338463", "0.49156815", "0.49037737", "0.4876145", "0.48652282", "0.4856782", "0.4856782", "0.48507717", "0.48341438", "0.48255143", "0.48229963", "0.48117", "0.47801366", "0.47708973", "0.47550097", "0.4747102", "0.47358042", "0.47308156", "0.47308156", "0.4719951", "0.4719059", "0.47178626", "0.47029397", "0.4696607", "0.46915463", "0.46880552", "0.4685393", "0.46745434", "0.46606115", "0.46598053", "0.46539617", "0.4641308", "0.46401685", "0.463255", "0.46322086", "0.46193314", "0.4618988", "0.4618988", "0.46170163", "0.46160942", "0.46144548", "0.4609324", "0.46015093", "0.45897174", "0.45785227", "0.45783713", "0.45755625", "0.45752564", "0.45726004", "0.45726004", "0.45701376", "0.4569465", "0.4563755", "0.456345", "0.45617935", "0.45603335", "0.45603335", "0.4559948", "0.45537937", "0.45533293", "0.45527658", "0.45525053", "0.45525053", "0.45525053", "0.45525053", "0.45495862", "0.45423576", "0.45397118", "0.4539028", "0.45299435", "0.4529795", "0.45258498", "0.45189375", "0.45135283", "0.45036834", "0.44979706", "0.4494514", "0.44934767", "0.4489105", "0.44833645", "0.44790938", "0.44783172" ]
0.0
-1
"From" values in the edit are different from the current values of the development attributes. This doesn't necessarily invalidate the entire edit, but needs to be taken into account.
def conflict? from_values.select{ |d,e| d != e }.any? end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_values\n d_attrs = development.reload.fields\n self.fields.map{ |field|\n name = field.fetch('name').to_s\n edit_from = field.fetch('from').to_s\n devel_from = d_attrs.fetch( name )\n [ devel_from, edit_from ]\n }\n end", "def destination_replace_from\n @attributes[:destination_replace_from]\n end", "def original_attributes\n @record.attributes.symbolize_keys.select do |(attr, _)|\n !%i(created_at updated_at).include?(attr)\n end\n end", "def version_attributes\n attributes = super\n\n if @reverted_from.nil?\n attributes\n else\n attributes.merge(:reverted_from => @reverted_from)\n end\n end", "def can_modify\n\t\tself.changed_attributes.each do |attr|\n\n\t\t\tif attr.to_s == \"reports\"\n\t\t\t\tself.reports.each do |r|\n\t\t\t\t\tunless r.changed_attributes.blank?\n\t\t\t\t\t\tif r.owner_ids.include? self.created_by_user_id\n\t\t\t\t\t\telsif r.owner_ids.include? self.created_by_user.organization.id.to_s\n\t\t\t\t\t\telse\n\t\t\t\t\t\t\tself.errors.add(:reports,\"You cannot edit #{attr.name.to_s}\")\n\t\t\t\t\t\tend\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\t\n\t\t\telsif attr.to_s == \"recipients\"\n\t\t\t\trecipients_changed\n\t\t\telsif attr.to_s == \"payments\"\n\t\t\t\told_payment_not_deleted\n\t\t\telse\n\t\t\t\t## only in case of \n\t\t\t\tif self.owner_ids.include? self.created_by_user.id.to_s\n\t\t\t\telsif self.owner_ids.include? self.created_by_user.organization.id.to_s\n\t\t\t\telse\n\t\t\t\t\tself.errors.add(:owner_ids,\"You cannot edit the field: #{attr.to_s}\")\n\t\t\t\tend\n\t\t\tend\n\n\t\tend\n\tend", "def editable_attribute_names; super + additional_fields end", "def editable_attribute_names; super + additional_fields end", "def editable_attribute_names; super + additional_fields end", "def from_ranges\n attributes.fetch(:fromRanges)\n end", "def fields_not_to_clean\n ['deleted_record','record_before','record_after']\n end", "def before_validation()\n logger.debug \"Vediamo di mettere mittente a riclife se non c'e'..\"\n self.from = User.default_system_user unless attribute_present?(\"from_id\") # unless User.find_by_login('riclife') unless \n end", "def clean_attributes\n @attribute_changes = {}\n end", "def permitted_attributes_for_update\n return permitted_attributes if record.user_id == user.id\n permitted_attributes.reject do |attr|\n [:start_date, :due_date, :estimated_time].include? attr\n end\n end", "def uncopied_attributes\n superclass.uncopied_attributes | [:chart_code, :number, :effective_date]\n end", "def from=(value)\n @from = value\n end", "def from=(value)\n @from = value\n end", "def global_changes(attributes_before_edit)\n\n # get a copy of the freshest attributes\n fresh_record_attributes = get_accessible_attributes\n\n # find any differences and return those keys\n (fresh_record_attributes.diff attributes_before_edit).keys\n end", "def changed_attributes\r\n attributes = HashWithIndifferentAccess.new\r\n \r\n if self.edited\r\n %w{rate name}.each do |attr|\r\n if self.different?(attr)\r\n attributes[attr] = self.send(attr)\r\n end\r\n end\r\n end\r\n attributes\r\n end", "def local_changes(attributes_to_save, attributes_before_edit)\n (attributes_to_save.diff attributes_before_edit).keys\n end", "def use_provided_modified_at\n @attributes[:use_provided_modified_at]\n end", "def time_valid\n if self.valid_from and self.valid_to then\n errors.add(:valid_from,\n \"must be before the valid to date. #{self.valid_from} >= #{self.valid_to}\") unless self.valid_from < self.valid_to\n else\n if self.valid_from or self.valid_to then\n errors.add(:valid_from,\n \" must be set when valid_to is set.\") unless self.valid_from\n errors.add(:valid_to,\n \" must be set when valid_from is set.\") unless self.valid_to\n end\n end\n end", "def changed\n self.class.fields.select do |field|\n field[0] != '_' and self._old_values[field] != self.send(field)\n end\n end", "def valid_from\n utc_timestamp_from('valid_from')\n end", "def dirty_attributes\n changed.inject({}) { |h, k| h[k] = attributes[k.to_s]; h }\n end", "def old_attributes\n (audited_changes || {}).inject({}.with_indifferent_access) do |attrs, (attr, values)|\n attrs[attr] = Array(values).first\n attrs\n end\n end", "def visible_changes\n changes.select {|key, (from, to)| (from.present? || to.present?) && (from.to_s != to.to_s) }\n end", "def display_attributes\n super.reject { |k, v| imported_attributes.fetch(k, nil) == v }\n end", "def display_attributes\n super.reject { |k, v| imported_attributes.fetch(k, nil) == v }\n end", "def editable_fields(obj)\n \tobj.attributes.select {|key| key.start_with? \"need_to_edit_\"}\n end", "def clean_dirty_attributes!\n @dirty_attribute_keys = []\n end", "def prevent_base_locale_from_changing\n errors.add(:base_rfc5646_locale, :readonly) if base_rfc5646_locale_changed?\n end", "def form_attributes\n sanitized_attributes = {}.with_indifferent_access\n attributes.reject { |attr| attr =~ /_at|version/ }.each do |key, value|\n sanitized_key = key == '_id' ? 'id' : key\n sanitized_attributes[sanitized_key] = value.to_s\n end\n sanitized_attributes\n end", "def original_attributes\n {}.with_indifferent_access.merge(attributes).merge(changed_attributes)\n end", "def original_attributes_before_type_cast\n ensure_original_attributes_stored\n clone_attributes :read_original_attribute_before_type_cast\n end", "def valid_from(from_time)\n add_field('authValidFrom', from_time.to_i.to_s + '000')\n end", "def from_truncated\n truncate_datetime(from)\n end", "def changed_columns\n cc = super\n cc = cc.dup if frozen?\n deserialized_values.each{|c, v| cc << c if !cc.include?(c) && original_deserialized_value(c) != v} \n cc\n end", "def validate_from\n @from = validate_address(FROM)\n end", "def valid_from(from_time)\n add_field('authValidFrom', from_time.to_i.to_s + '000')\n end", "def from=(from)\n write_attr :from, from\n end", "def originalsourceform; end", "def edit_entry_attrs\n test_entry_attrs\n end", "def set_old_values\n self._old_values = {}\n\n self.class.fields.each do |field|\n self._old_values[field] = self.send(field) unless field[0] == '_'\n end\n end", "def uncopied_attributes\n base_uncopied_attributes | [:labor_benefit_rate_category_code, :major_reporting_category_code]\n end", "def expand_changes(attrs)\n attrs = attrs.symbolize_keys.tap { |h| self.class.coerce(h) }\n @dirty_attributes = @dirty_attributes.merge(attrs).freeze # melt and freeze, huh\n @all_attributes = nil # it is not valid anymore\n\n self.class.validate_presence_of_proper_attributes(@base_attributes, @dirty_attributes)\n end", "def resent_from( val = nil )\n default :resent_from, val\n end", "def permitted_attributes\n [:user_id, :code, :name, :status_id, :group_id, :contact_id, :body,\n :address, :hours, :notes, :live_date, :website, :rss, :twitter, :wiki,\n :logo,\n contact_attributes: %i[id name hidden email phone notes user_id _destroy]]\n end", "def validations_for_diff\n []\n end", "def update_from(other)\n @name = other.name\n @position = other.position\n @topic = other.topic\n @recipients = other.recipients\n @bitrate = other.bitrate\n @user_limit = other.user_limit\n @permission_overwrites = other.permission_overwrites\n @nsfw = other.nsfw\n @parent_id = other.parent_id\n @rate_limit_per_user = other.rate_limit_per_user\n end", "def age_from_fits_age_to\n return if age_from && age_to && age_from <= age_to\n errors.add :age_from, I18n.t('offer.validations.age_from_be_smaller')\n end", "def valid_attributes\n { \"amount\"=>1245.to_s, 'book_id'=>@od.to_param,\n \"narration\"=>'Premier virement', \"date\"=>Date.today.to_formatted_s('%d-%m-%Y'),\n :compta_lines_attributes=>{'0'=>{account_id:@ba.to_param}, '1'=>{account_id:@bb.to_param}}\n }\n end", "def is_editable\n not is_default_cash_gift\n end", "def from\n attributes.fetch(:from)\n end", "def changed?; not pristine?; end", "def ensure_editable\n errors.add(:base, I18n.t('settler.errors.editable', :default => 'Setting cannot be changed')) if changed? && !editable?\n end", "def invalid_modifiers\n @invalid_modifiers = end_product_establishment_source.invalid_modifiers\n end", "def override\n attributes.override\n end", "def ensure_editable; return false unless editable? end", "def list_invalid_properties\n invalid_properties = super\n invalid_properties\n end", "def list_invalid_properties\n invalid_properties = super\n invalid_properties\n end", "def list_invalid_properties\n invalid_properties = super\n invalid_properties\n end", "def list_invalid_properties\n invalid_properties = super\n invalid_properties\n end", "def list_invalid_properties\n invalid_properties = super\n invalid_properties\n end", "def list_invalid_properties\n invalid_properties = super\n invalid_properties\n end", "def list_invalid_properties\n invalid_properties = super\n invalid_properties\n end", "def list_invalid_properties\n invalid_properties = super\n invalid_properties\n end", "def register_attributes\n [:from , :to]\n end", "def original_attributes\n ensure_original_attributes_stored\n clone_attributes :read_original_attribute\n end", "def filter_illegal_changes\n return unless params[:description].is_a?(ActionController::Parameters)\n\n root = in_admin_mode?\n admin = @description.is_admin?(@user)\n author = @description.author?(@user)\n\n params[:description].delete(:source_type) unless root\n unless root ||\n ((admin || author) &&\n # originally was\n # (desc.source_type != \"project\" &&\n # desc.source_type != \"project\"))\n # see https://www.pivotaltracker.com/story/show/174566300\n @description.source_type != \"project\")\n params[:description].delete(:source_name)\n end\n params[:description].delete(:license_id) unless root || admin || author\n end", "def no_best_time_entered(attributes)\n if attributes['source_location'].blank? && attributes['formatted_value'].blank?\n if attributes['id'].present?\n attributes['_destroy'] = 1\n false\n else\n true\n end\n else\n false\n end\n end", "def edit\n @instance.attributes.each do |k,v|\n if v.nil?\n @instance[k][email protected]_class_set[k]\n end\n end\n @instance.start_time = @instance.start_time.to_s(:time)\n @instance.end_time = @instance.end_time.to_s(:time)\n end", "def should_be_partially_valid_except(*attrs)\n invalid_attributes = []\n all_attrs = self.instance_variables.map {|iv| iv.gsub('@','')}\n # use the key names from @attributes for ActiveRecord objects\n all_attrs = self.attributes.keys if self.instance_variables.include?('attributes')\n attrs.each do |attr_name|\n invalid_attributes << attr_name.to_s\n end\n should_be_valid_on = all_attrs - invalid_attributes\n should_be_partially_valid_on(should_be_valid_on)\n end", "def prepare_reuse_validation_diff(other)\n original = self.reload!.comparable\n reference = other.reload!.comparable\n unique_identifiers.each { |id| reference[id] = original[id] }\n [original, reference]\n end", "def copy_from(arg)\n time_entry = arg.is_a?(TimeEntry) ? arg : TimeEntry.visible.find(arg)\n self.attributes = time_entry.attributes.dup.except(\"id\", \"created_on\", \"updated_on\")\n self.custom_field_values = time_entry.custom_field_values.inject({}) {|h,v| h[v.custom_field_id] = v.value; h}\n # @copied_from = time_entry\n self\n end", "def getValidAndInvalidAttributes\n @valid_attributes = []\n\n @valid_attributes_from_db.each_with_index do |attr_from_db, i| \n if @excel_attributes.include? attr_from_db[:name]\n @valid_attributes << attr_from_db\n end\n end\n\n x_attr = []\n @valid_attributes.each {|x| x_attr << x[:name]}\n @invalid_attributes = @excel_attributes - x_attr\n end", "def previous_changes_clean\n excluded_params = [:updated_at]\n excluded_params << :reset_at unless previous_changes[:reset_amount]\n previous_changes.except(*excluded_params)\n end", "def diff_edits_from_empty\n diff_edits(true)\n end", "def form_attributes\n attributes - read_only_attributes\n end", "def is_not_editable!\n update_attribute :is_editable, false\n end", "def fields_need_editing(obj) \n \teditable_fields(obj).select {|k,v| v == true }\n end", "def linked_editing_range_provider\n attributes.fetch(:linkedEditingRangeProvider)\n end", "def filter_attributes(record)\n self.changed_keys ||= []\n if !record.auditable_attributes.nil?\n self.changed_keys &= record.auditable_attributes\n end\n if !record.unauditable_attributes.nil?\n self.changed_keys -= record.unauditable_attributes\n end\n self.changed_keys -= %w{updated_at created_at _id}\n self.changed_keys\n end", "def sanitize_draft_errors(draft)\n if draft.errors.any?\n errors = draft.errors.dup\n draft.errors.clear\n errors.each{|attr, message|\n if attr == 'outbounds_phone_number_phone_number_digits'\n draft.errors.add(:outbounds_phone_number, message)\n elsif !(attr =~ /^outbounds_phone_number/) && !(attr == 'outbounds' && message == 'is invalid')\n draft.errors.add(attr, message)\n end\n }\n end\n end", "def monto_utilizado_devoluciones\n credito_restante = notas_creditos_debito.credito_restante\n credito_restante += monto_utilizado_was if persisted? # sumar el monto_utilizado anterior si se esta editando\n errors.add(:monto_utilizado, I18n.t('activerecord.errors.messages.credito_superior_a_disponible')) if credito_restante < monto_utilizado\n errors.add(:monto_utilizado, I18n.t('activerecord.errors.messages.monto_utilizado_cero')) if monto_utilizado <= 0\n false if errors.size > 0\n end", "def valid_attributes\n {\n :executed => 1.day.ago,\n :sequence_source_id => sequence_source.id,\n }\n end", "def valid_to\n utc_timestamp_from('valid_to')\n end", "def audited_attributes\n attributes.except(*non_audited_columns.map(&:to_s))\n end", "def clear_changed_attributes\n\t\t\t \t\t$TRACE.debug 5, \"clear_changed_attributes\"\n\t\t\t \tself.changed_attributes_aado = []\n\t\t\t \tend", "def sanitize_for_mass_assignment(attrs)\n return attrs\n end", "def dirty_attributes\n dirty_attributes = {}\n properties = self.properties\n\n original_values.each do |name, old_value|\n property = properties[name]\n new_value = property.get!(self)\n\n dirty = case property.track\n when :hash then old_value != new_value.hash\n else\n property.value(old_value) != property.value(new_value)\n end\n\n if dirty\n property.hash\n dirty_attributes[property] = property.value(new_value)\n end\n end\n\n dirty_attributes\n end", "def valid_attributes\n {\n start_date: Date.new(2013, 01, 20),\n end_date: Date.new(2013, 02, 20),\n onsite: false,\n name: \"Early Competitor\",\n registrant_type: \"competitor\",\n expense_item_attributes: {\n cost: @comp_exp.cost,\n tax: @comp_exp.tax\n }\n }\n end", "def validation_origin_destination\n errors.add(:base,\"Origin and Destination must be different\") if locale_origin_id == locale_destination_id # Rails3 slightly different?\n end", "def address_attributes\n attributes.delete_if {|key, value| [\"id\", 'updated_at', 'created_at'].any?{|k| k == key }}\n end", "def address_attributes\n attributes.delete_if {|key, value| [\"id\", 'updated_at', 'created_at'].any?{|k| k == key }}\n end", "def changes; self.diff @pristine; end", "def updatable_attributes\n [:name]\n end", "def processed_from\n ObjectPatch::Pointer.parse(@from)\n end", "def changed_attributes\n @changed_attributes ||= {}.with_indifferent_access\n end", "def invalidated_on_change_of\r\n [:size]\r\n end", "def valid_attributes\n { \"_id\" => 1,\n \"stade\" => \"projet\",\n \"code\" => '1',\n \"description\" => 'd',\n \"date_debut\" => '2013.01.01',\n \"duree_projet\" => 8,\n \"type_produit\" => :back_office,\n \"duree_vie\" => 10,\n \"publie\" => false }\n end", "def recipients_from_original_to\n @recipients_from_original_to ||= unquoted_address_header('x-original-to')\n end" ]
[ "0.69445574", "0.5483634", "0.5477963", "0.5458728", "0.5289405", "0.5284324", "0.5284324", "0.5284324", "0.5272686", "0.5270673", "0.5227608", "0.52250606", "0.5209125", "0.52060586", "0.51923555", "0.51923555", "0.51895595", "0.5185874", "0.51841074", "0.5177107", "0.5164773", "0.5157362", "0.51469076", "0.512662", "0.5106723", "0.5079389", "0.5064964", "0.5064964", "0.5057178", "0.50330645", "0.502315", "0.5004072", "0.50004035", "0.49890336", "0.4979702", "0.49751538", "0.49695748", "0.49619833", "0.49574563", "0.49550483", "0.495444", "0.4945831", "0.4903748", "0.4902305", "0.4900545", "0.4900399", "0.49000633", "0.48729292", "0.48569152", "0.48555034", "0.48421064", "0.48305646", "0.4823297", "0.48090822", "0.48082602", "0.4805508", "0.48031798", "0.48008493", "0.47961807", "0.47961807", "0.47961807", "0.47961807", "0.47961807", "0.47961807", "0.47961807", "0.47961807", "0.47926888", "0.47916806", "0.47882026", "0.47753134", "0.47714233", "0.4768557", "0.47639275", "0.4762752", "0.47535524", "0.47518778", "0.4747979", "0.47464436", "0.474634", "0.4745791", "0.4729058", "0.47172198", "0.4715322", "0.47149104", "0.47068796", "0.47042832", "0.4694014", "0.4684497", "0.46830985", "0.46786332", "0.46786043", "0.46772483", "0.46708193", "0.46708193", "0.46690843", "0.46679533", "0.46608263", "0.46568567", "0.4652297", "0.4644689", "0.46409816" ]
0.0
-1
Returns pairs of "from" values, from development and edit, in that order. All values are strings. TODO: May want to make each edited field its own model, to better enforce the schema.
def from_values d_attrs = development.reload.fields self.fields.map{ |field| name = field.fetch('name').to_s edit_from = field.fetch('from').to_s devel_from = d_attrs.fetch( name ) [ devel_from, edit_from ] } end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def changes\n Hash[(from.keys + to.keys).collect do |key|\n [key, [from[key], to[key]]]\n end]\n end", "def assignable_fields\n names = fields.map{|f| f.fetch \"name\" }\n to_values = fields.map{|f| f.fetch \"to\" }\n Hash[names.zip(to_values)]\n end", "def fields_for_display\n attributes = self.relevant_fields\n attributes.map{|key| [WorkOrder.human_attribute_name(key), self[key]]}\n end", "def fields_for_display\n attributes = self.relevant_fields\n attributes.map{|key| [WorkOrder.human_attribute_name(key), self[key]]}\n end", "def fields_for_display\n attributes = self.relevant_fields\n attributes.map{|key| [WorkOrder.human_attribute_name(key), self[key]]}\n end", "def local_changes(attributes_to_save, attributes_before_edit)\n (attributes_to_save.diff attributes_before_edit).keys\n end", "def default_fields_for_forms\n [\n { name: :get_verbose_conflicting_row, width: 50,\n label: I18n.t('admin_import.conflicting_row')\n },\n { name: :import_text, flex: 1, label: I18n.t('admin_import.import_text') },\n\n { name: :data_import_meeting_program__get_short_name, width: 210,\n label: I18n.t('activerecord.models.data_import_meeting_program')\n },\n { name: :meeting_program__get_short_name, width: 110,\n label: I18n.t('activerecord.models.meeting_program')\n },\n { name: :data_import_team__name, width: 110, label: I18n.t('activerecord.models.data_import_team') },\n { name: :team__name, width: 110, label: I18n.t('activerecord.models.team') },\n\n { name: :rank, width: 30,\n label: I18n.t('activerecord.attributes.meeting_relay_result.rank')\n },\n { name: :is_play_off, width: 50, default_value: true, unchecked_value: 'false',\n label: I18n.t('activerecord.attributes.meeting_relay_result.is_play_off')\n },\n { name: :is_out_of_race, width: 50, default_value: false, unchecked_value: 'false',\n label: I18n.t('activerecord.attributes.meeting_relay_result.is_out_of_race')\n },\n { name: :is_disqualified, width: 50, default_value: false, unchecked_value: 'false',\n label: I18n.t('activerecord.attributes.meeting_relay_result.is_disqualified')\n },\n { name: :disqualification_code_type__i18n_short, width: 50,\n label: I18n.t('activerecord.models.result_type')\n },\n { name: :standard_points, width: 50,\n label: I18n.t('activerecord.attributes.meeting_relay_result.standard_points')\n },\n { name: :meeting_points, width: 50,\n label: I18n.t('activerecord.attributes.meeting_relay_result.meeting_points')\n },\n { name: :minutes, width: 30, label: I18n.t('activerecord.attributes.meeting_relay_result.minutes') },\n { name: :seconds, width: 30, label: I18n.t('activerecord.attributes.meeting_relay_result.seconds') },\n { name: :hundreds, width: 30, label: I18n.t('activerecord.attributes.meeting_relay_result.hundreds') }\n ]\n end", "def fields_for_query\n self.class.fields_coercions.keys.each_with_object({}) do |field_name, results|\n results[field_name] = @fields.each_with_object({}) do |(locale, fields), field_results|\n field_results[locale] = get_value_from(fields, field_name)\n end\n end\n end", "def fields_for_query\n self.class.fields_coercions.keys.each_with_object({}) do |field_name, results|\n results[field_name] = @fields.each_with_object({}) do |(locale, fields), field_results|\n field_results[locale] = get_value_from(fields, field_name)\n end\n end\n end", "def global_changes(attributes_before_edit)\n\n # get a copy of the freshest attributes\n fresh_record_attributes = get_accessible_attributes\n\n # find any differences and return those keys\n (fresh_record_attributes.diff attributes_before_edit).keys\n end", "def diff_edits(diff_from_empty=false)\n edit_diffs = Hash.new\n e = Errata.find(errata.id)\n\n # synopsis field needs a special case due to being rewritten before save...\n get_errata = {\n :synopsis => lambda { |e,_| synopsis_preview(e) },\n :default => lambda { |e,field| e.send(field) }\n }\n get_params = {\n :synopsis => lambda { |_| synopsis_preview_from_params },\n :default => lambda { |field| params[:advisory][field] }\n }\n\n # NOTE: Not sure why only these fields are checked, other\n # changes like keywords, cross_references, idsfixed etc might\n # also cause the docs output to change. Is this a feature?\n [:synopsis, :topic, :description, :solution].each do |field|\n errata_read = get_errata[field] || get_errata[:default]\n params_read = get_params[field] || get_params[:default]\n if diff_from_empty\n # This part is a little confusing.\n # Compare the unedited value with an empty string.\n old = ''\n new = errata_read.call(e, field)\n else\n # This part makes more sense.\n # Compare the unedited value with the new edited value.\n old = errata_read.call(e, field)\n new = params_read.call(field)\n end\n next unless new\n text_diff = diff_as_string(old,new)\n unless text_diff.empty?\n edit_diffs[field] = text_diff\n end\n end\n edit_diffs\n end", "def edits\n @edits ||= {}\n end", "def changed_fields\n items = @resource.changed_from_previous_curated\n return ['none'] unless items.present?\n\n items\n end", "def fields\n %i[ position_title employee_type request_type contractor_name\n number_of_positions hourly_rate hours_per_week number_of_weeks annual_cost\n nonop_funds nonop_source justification organization__name unit__name\n review_status__name review_comment user__name created_at updated_at ]\n end", "def get_updates\n # TODO Rewrite this properly\n @_original ||= {}\n original_data = {}\n self.class.attributes.each_pair do |name, attrib|\n attrib.set(original_data, @_original[name])\n end\n updates = {}\n original_data.each_pair do |field, value|\n updates[field] = data[field] if data[field] != original_data[field]\n end\n updates\n end", "def fields\n @lhs.fields + (@rhs.fields - @rhs.keys).map{|f| \n @lhs.fields.include?(f) ? \"rhs.#{f}\" : f\n }\n end", "def form_fields\n values = super\n result = {}\n mappings.values.each { |field|\n result[field] = values[field] if values[field]\n }\n result\n end", "def edtf\n [\n from.send(from.respond_to?(:edtf) ? :edtf : :to_s),\n to.send(to.respond_to?(:edtf) ? :edtf : :to_s)\n ] * '/'\n end", "def visible_changes\n changes.select {|key, (from, to)| (from.present? || to.present?) && (from.to_s != to.to_s) }\n end", "def editable_fields(obj)\n \tobj.attributes.select {|key| key.start_with? \"need_to_edit_\"}\n end", "def destination_replace_from\n @attributes[:destination_replace_from]\n end", "def version_attributes\n attributes = super\n\n if @reverted_from.nil?\n attributes\n else\n attributes.merge(:reverted_from => @reverted_from)\n end\n end", "def default_fields_for_forms\n [\n { name: :get_verbose_conflicting_row, width: 50,\n label: I18n.t('admin_import.conflicting_row')\n },\n { name: :import_text, flex: 1, label: I18n.t('admin_import.import_text') },\n\n { name: :event_order, width: 50,\n label: I18n.t('activerecord.attributes.meeting_program.event_order')\n },\n { name: :begin_time, width: 80, xtype: 'datecolumn', format: 'H:i',\n label: I18n.t('activerecord.attributes.meeting_program.begin_time')\n },\n { name: :notes, label: I18n.t(:notes) },\n\n { name: :data_import_meeting_session__get_full_name, width: 160,\n label: I18n.t('activerecord.models.data_import_meeting_session')\n },\n { name: :meeting_session__get_full_name, width: 110,\n label: I18n.t('activerecord.models.meeting_session')\n },\n { name: :event_type__i18n_short, width: 110,\n label: I18n.t('activerecord.models.event_type')\n },\n { name: :category_type__short_name, width: 60,\n label: I18n.t('activerecord.models.category_type')\n },\n # Base timings:\n { name: :minutes, width: 30, label: I18n.t('activerecord.attributes.meeting_program.minutes') },\n { name: :seconds, width: 30, label: I18n.t('activerecord.attributes.meeting_program.seconds') },\n { name: :hundreds, width: 30, label: I18n.t('activerecord.attributes.meeting_program.hundreds') },\n { name: :time_standard__get_timing, width: 110,\n label: I18n.t('activerecord.models.time_standard')\n },\n { name: :heat_type__i18n_description, width: 80,\n label: I18n.t('activerecord.models.heat_type')\n }\n ]\n end", "def fields\n %i[ request_model_type position_title employee_type request_type\n contractor_name employee_name annual_cost_or_base_pay\n nonop_source justification organization__name unit__name\n review_status__name review_comment user__name created_at updated_at ]\n end", "def fields\n %i[ position_title employee_type request_type\n contractor_name number_of_months annual_base_pay\n nonop_funds nonop_source justification organization__name unit__name\n review_status__name review_comment user__name created_at updated_at ]\n end", "def modifications\n\t\treturn unless self.modified?\n\t\tself.log.debug \"Gathering modifications...\"\n\n\t\tmods = []\n\t\[email protected]_by {|k, _| k.to_s }.each do |attribute, vals|\n\t\t\tself.log.debug \" finding mods for %s\" % [ attribute ]\n\t\t\tmod = self.diff_with_entry( attribute, vals ) or next\n\t\t\tmods << mod\n\t\tend\n\n\t\treturn mods\n\tend", "def changed_attributes\r\n attributes = HashWithIndifferentAccess.new\r\n \r\n if self.edited\r\n %w{rate name}.each do |attr|\r\n if self.different?(attr)\r\n attributes[attr] = self.send(attr)\r\n end\r\n end\r\n end\r\n attributes\r\n end", "def get_entered_fields\n @entered_fields = get_used_fields_only(@contact_form_field)\n @entered_fields\n end", "def from_ranges\n attributes.fetch(:fromRanges)\n end", "def mutable_fields\n # everything but 0 (id) and 1 (scraped_at)\n to_a[2..-1]\n end", "def default_fields_for_forms\n [\n { :name => :le_title__get_full_name, :field_label => I18n.t(:le_title, {:scope=>[:activerecord, :models]}),\n # [20121121] For the combo-boxes to have a working query after the 4th char is entered in the edit widget,\n # a lambda statement must be used. Using a pre-computed scope from the Model class prevents Netzke\n # (as of this version) to append the correct WHERE clause to the scope itself (with an inline lambda, instead, it works).\n :scope => lambda { |rel| rel.order(\"name ASC\") }\n },\n { :name => :name, :field_label => I18n.t(:name) },\n { :name => :surname, :field_label => I18n.t(:surname) },\n { :name => :le_contact_type__get_full_name, :field_label => I18n.t(:le_contact_type, {:scope=>[:activerecord, :models]}),\n # [20121121] See note above for the sorted combo boxes.\n :scope => lambda { |rel| rel.order(\"name ASC\") }\n },\n { :name => :is_suspended, :field_label => I18n.t(:is_suspended),\n :default_value => false, :unchecked_value => 'false',\n :field_style => 'min-height: 13px; padding-left: 13px;'\n },\n { :name => :address, :field_label => I18n.t(:address) },\n { :name => :le_city__get_full_name, :field_label => I18n.t(:le_city, {:scope=>[:activerecord, :models]}),\n # [20121121] See note above for the sorted combo boxes.\n :scope => lambda { |rel| rel.order(\"name ASC, area ASC\") }\n },\n { :name => :tax_code, :field_label => I18n.t(:tax_code) },\n { :name => :vat_registration, :field_label => I18n.t(:vat_registration) },\n { :name => :date_birth, :field_label => I18n.t(:date_birth) },\n { :name => :phone_home, :field_label => I18n.t(:phone_home) },\n { :name => :phone_work, :field_label => I18n.t(:phone_work) },\n { :name => :phone_cell, :field_label => I18n.t(:phone_cell) },\n { :name => :phone_fax, :field_label => I18n.t(:phone_fax) },\n { :name => :e_mail, :field_label => I18n.t(:e_mail) },\n\n { :name => :date_last_met, :field_label => I18n.t(:date_last_met) },\n { :name => :notes, :field_label => I18n.t(:notes), :width => 200 },\n { :name => :personal_notes, :field_label => I18n.t(:personal_notes), :width => 200 },\n { :name => :family_notes, :field_label => I18n.t(:family_notes), :width => 200 }\n ]\n end", "def extract_changes_from(record)\n self.changed_keys ||= record.changed\n self.filter_attributes(record)\n self.old_values ||= Hash.new\n self.new_values ||= Hash.new\n record.changed.each do |key|\n self.old_values[key] = record.changes[key.to_s].try(:first)\n self.new_values[key] = record.changes[key.to_s].try(:last)\n end\n self.changed_keys\n end", "def editable_attribute_names; super + additional_fields end", "def editable_attribute_names; super + additional_fields end", "def editable_attribute_names; super + additional_fields end", "def field_changes\n table_name = if table_name_changed\n prev_table_name\n else\n self.table_name\n end\n\n begin\n cols = table_columns\n old_colnames = cols.map(&:name) - standard_columns\n old_colnames = old_colnames.reject { |f| f.index(/^embedded_report_|^placeholder_/) }\n rescue StandardError\n return\n end\n\n fields = migration_fields_array\n new_colnames = fields.map(&:to_s) - standard_columns\n\n added = new_colnames - old_colnames\n removed = old_colnames - new_colnames\n changed = {}\n db_configs.each do |k, v|\n current_type = cols.find { |c| c.name == k.to_s }&.type\n next unless v[:type] && current_type\n\n expected_type = v[:type]&.to_sym\n current_type = :timestamp if current_type == :datetime\n changed[k.to_s] = expected_type if current_type != expected_type\n end\n\n if belongs_to_model\n belongs_to_model_id = \"#{belongs_to_model}_id\"\n removed -= [belongs_to_model_id]\n end\n\n [added, removed, changed, old_colnames]\n end", "def default_fields_for_forms\n [\n { name: :get_verbose_conflicting_row, width: 50,\n label: I18n.t('admin_import.conflicting_row')\n },\n { name: :import_text, flex: 1, label: I18n.t('admin_import.import_text') },\n\n { name: :name, width: 180, label: I18n.t('activerecord.attributes.team.name') },\n { name: :badge_number, width: 100, label: I18n.t('activerecord.attributes.team.zip') },\n\n { name: :data_import_city__name, width: 200,\n label: I18n.t('activerecord.models.data_import_city')\n },\n { name: :city__get_full_name, width: 200, label: I18n.t('activerecord.models.city')\n }\n ]\n end", "def field_options\n [\n ['Do Not Import Field', :none],\n ['Full Name', :full_name],\n ['First Name', :first_name],\n ['Last Name', :last_name],\n ['Email', :email],\n ['Type', :type],\n ['Company', :company],\n ['Street Address', :address1],\n ['City/State/Zipcode', :address2],\n ['City', :city],\n ['State', :state],\n ['ZipCode', :zip],\n ['Source', :source],\n ['Group', :category],\n ['Mobile Phone', :phone_mobile],\n ['Business Phone', :phone_business],\n ['Home Phone', :phone_home],\n ['Fax Number', :fax_number],\n ['Temperature', :temperature],\n ['Birthday', :birthday],\n ['Anniversary', :anniversary],\n ['Spouse', :spouse],\n ['Home Purchase Date', :home_anniversary],\n ['Home Budget Price', :budget],\n ['Notes/Comments', :description]\n ]\n end", "def fields\n form = @stamper.getAcroFields\n form.getFields.each_with_object({}) do |(name, value), fields|\n fields[name.to_sym] = form.getField(name)\n end\n end", "def edit # TODO: do I need to actually order it?\n\t\tall_questions = Question.select('short_name,text').all\n\t\t@question_names = \"[#{all_questions.collect { |q| \"\\\"#{q.short_name}\\\"\" }.compact.join(',')}]\"\n\t\t@question_texts = \"[#{all_questions.collect { |q| \"\\\"#{q.text}\\\"\" }.compact.join(',')}]\"\n\t\t\n\t\t@all_choices = \"[#{Choice.select('value').order('value ASC').all.collect { |c| \"\\\"#{c.value}\\\"\" }.compact.join(',')}]\" # construct list for typeahead\n\t\t_get_current_question_and_choices(params[:id])\n\tend", "def fields\n [*lookup]\n end", "def changes_for_draftsman(event)\n the_changes = {}\n ignore = self.class.draftsman_options[:ignore]\n skip = self.class.draftsman_options[:skip]\n only = self.class.draftsman_options[:only]\n draftable_attrs = self.attributes.keys - ignore - skip\n draftable_attrs = draftable_attrs & only if only.present?\n\n # If there's already an update draft, get its changes and reconcile them\n # manually.\n if event == :update\n # Collect all attributes' previous and new values.\n draftable_attrs.each do |attr|\n if self.draft? && self.draft.changeset && self.draft.changeset.key?(attr)\n the_changes[attr] = [self.draft.changeset[attr].first, send(attr)]\n else\n the_changes[attr] = [self.send(\"#{attr}_was\"), send(attr)]\n end\n end\n # If there is no draft or it's for a create, then all draftable\n # attributes are the changes.\n else\n draftable_attrs.each { |attr| the_changes[attr] = [nil, send(attr)] }\n end\n\n # Purge attributes that haven't changed.\n the_changes.delete_if { |key, value| value.first == value.last }\n end", "def default_fields_for_forms\n [\n { name: :get_verbose_conflicting_row, width: 50,\n label: I18n.t('admin_import.conflicting_row')\n },\n { name: :import_text, flex: 1, label: I18n.t('admin_import.import_text') },\n\n { name: :name, width: 160, label: I18n.t('activerecord.attributes.city.name') },\n { name: :zip, width: 80, label: I18n.t('activerecord.attributes.city.zip') },\n { name: :area, width: 100, label: I18n.t('activerecord.attributes.city.area') },\n { name: :country, width: 100, label: I18n.t('activerecord.attributes.city.country') },\n { name: :country_code, width: 40, label: I18n.t('activerecord.attributes.city.country_code') }\n ]\n end", "def update_fields\n fieldlist = self.class.required_on_update + @newvalues.to_a - self.class.never_on_update\n @properties.select{|key, value| fieldlist.include?(key)}\n end", "def record_fields\n record_format.values_at(:key, :sys, :row).flatten.sort_by(&:position) if record_format\n end", "def values_on_edit(mode)\r\n @event_params ||= { id: @event&.id,\r\n title: @event&.title,\r\n open_date: @event&.open_date&.with_formats('date', 'time'),\r\n start_date: @event&.start_date&.with_formats('date', 'time'),\r\n category: @event&.category,\r\n information: @event&.information,\r\n official_url: @event&.official_url,\r\n publishing_status: @event&.publishing_status,\r\n place_id: @event&.place_id,\r\n flyers: @event&.flyers,\r\n original_event_id: @event&.original_event_id }\r\n @place_params ||= { title: @event.place&.title,\r\n address: @event.place&.address,\r\n official_url: @event.place&.official_url }\r\n @event_programs_params = {}\r\n @event.event_programs.each_with_index do |ev_program, program_idx|\r\n @event_performers_params = {}\r\n ev_program.event_performers.each_with_index do |ev_performer, performer_idx|\r\n @event_performers_params[:\"#{performer_idx}\"] = { id: ev_performer.id,\r\n performer_id: ev_performer.performer_id,\r\n full_name: ev_performer.performer.full_name,\r\n mode: mode }\r\n end\r\n @event_performers_params = { '0': { id: nil, performer_id: nil, full_name: nil, mode: mode } } if ev_program.event_performers.count == 0\r\n @event_programs_params[:\"#{program_idx}\"] = { id: ev_program.id,\r\n program_id: ev_program.program_id,\r\n title: ev_program.program.title,\r\n genre: ev_program.genre,\r\n event_performers: @event_performers_params,\r\n mode: mode }\r\n end\r\n if @event.event_programs.count == 0\r\n @event_performers_params = { '0': { id: nil, performer_id: nil, full_name: nil, mode: mode } }\r\n @event_programs_params = { '0': { id: nil, program_id: nil, title: nil, genre: nil, event_performers: @event_performers_params, mode: mode } }\r\n end\r\n\r\n @tickets_params = {}\r\n @event.tickets.each_with_index do |ticket, idx|\r\n @tickets_params[:\"#{idx}\"] = { id: ticket&.id,\r\n grade: ticket&.grade,\r\n price: ticket&.price,\r\n mode: mode }\r\n end\r\n @tickets_params = { '0': { id: nil, grade: nil, price: nil, mode: mode } } if @event.tickets.count == 0\r\n @error_msgs ||= {}\r\n end", "def keys\n @keys ||= fields.order(:fieldnum).select do |field|\n field.useedit & 1 == 1\n end.map do |field|\n field.fieldname.downcase\n end\n end", "def staff_request_fields\n staff_request_all_fields - %i[nonop_source justification review_comment created_at updated_at]\n end", "def hash\n [id, field_type, mode, label, no_wrap, bold, required, appears_by_default, find_enabled, allow_new_choices, sort_as_given, carry_choices, foreign_key, unique, does_data_copy, field_help, audited, num_lines, max_length, append_only, allow_html, has_extension, units, decimal_places, comma_start, number_format, does_average, does_total, blank_is_zero, currency_symbol, currency_format, display_time, display_relative, display_month, default_today, display_day_of_week, display_timezone, work_week, start_field, duration_field, format, hours24, display_user, default_kind, default_value_luid, choices_luid, choices, composite_fields, target_table_id, target_field_id, source_field_id, use_new_window, link_text, exact, display_images, default_domain, display_email, appears_as, abbreviate, auto_save, target_table_name, display_as_link, sort_alpha, version_mode, max_versions, see_versions, width, xml_tag, formula, default_value, comments, master_choice_table_id, master_choice_field_id, snap_field_id, parent_field_id, lookup_target_field_id, lookup_reference_field_id, summary_reference_field_id, summary_target_field_id, summary_function, master_table_tag, permissions].hash\n end", "def edit_field_terms\n [:identifier, :title, :maker, :resource_type, :genre_string,\n :description, :extent, :rights,\n :publisher, :date_original, :date_published, :subject,\n :language, :related_url,\n :medium, :place_of_interview, :place_of_manufacture,\n :place_of_publication, :provenance]\n end", "def diff(other)\n if engine != other.engine && @input_engine\n [:engine, engine, other.engine]\n elsif version != other.version\n [:version, version, other.version]\n elsif engine_version != other.engine_version && @input_engine\n [:engine_version, engine_version, other.engine_version]\n elsif patchlevel != other.patchlevel && @patchlevel\n [:patchlevel, patchlevel, other.patchlevel]\n end\n end", "def contractor_request_fields\n contractor_request_all_fields - %i[nonop_source justification review_comment created_at updated_at]\n end", "def input_date_range(name, from, to)\n field = field_content(name)\n id = field_id(name)\n {\n id: id,\n name: name,\n description: prop(field, 'description'),\n label: prop(field, 'label'),\n from: input_date_month_year(value_by_key(from, 'name'), value_by_key(from, 'date')),\n to: input_date_month_year(value_by_key(to, 'name'), value_by_key(to, 'date')),\n }\n end", "def changed\n self.class.fields.select do |field|\n field[0] != '_' and self._old_values[field] != self.send(field)\n end\n end", "def edit_users\n return self[:edit_users] unless persisted?\n (self[:edit_users] + (Wayfinder.for(self).ephemera_project&.edit_users || [])).uniq\n end", "def changes\n Change.all.sort! { |a, b| b.value <=> a.value }\n end", "def form_entry_param\n Array.new.tap do |arr|\n form.fields.map do |field|\n if field.field_type_range?\n arr << { field.id.to_s => [:from, :to] }\n\n elsif field.field_type_datetime?\n arr << { field.id.to_s => [:date, :hours, :minutes] }\n\n elsif field.field_type_address?\n arr << { field.id.to_s => [:address, :city, :postal_code, :country] }\n\n elsif field.field_type_question_group?\n # TO DO\n arr << { 'field.id' => [ 'row_10' ] }\n\n elsif field.field_type_checkbox? || field.field_type_mcq?\n arr << { field.id.to_s => [] }\n\n elsif field.field_type_statement?\n # TO DO\n field.properties['statements'].each do |key, value|\n arr << field.id.to_s + \"_#{key}\"\n end\n\n elsif field.field_type_file?\n arr << field.id.to_s\n\n else\n arr << field.id.to_s\n end\n end\n end\n end", "def normalize_fields\n @fields = @fields.map do |field|\n case field\n when Symbol, String\n @properties[field]\n\n when Property, Operator\n field\n end\n end\n end", "def fields\n displayed_fields = @display_fields.select { |display_field| @document.key?(display_field) }\n pairs = displayed_fields.map do |display_field|\n [display_field, Array.wrap(@document.fetch(display_field))]\n end\n Hash[pairs]\n end", "def difference_fields(base_fields, remove_fields)\n fields(base_fields.to_a - remove_fields.to_a)\n end", "def revert_fields(*names)\n names = changed if names.empty?\n reverted = []\n names.each do |name|\n if self.respond_to? \"#{name}_change\"\n values = self.__send__ \"#{name}_change\"\n if values\n self.__send__ \"#{name}=\", values.first\n reverted << name\n end\n end\n end\n\n reverted\n end", "def from\n attributes.fetch(:from)\n end", "def fieldmap\n { \n 'MEMBERID' => 'id',\n 'EMAIL' => 'email',\n 'FNAME' => 'first_name',\n 'LNAME' => 'last_name',\n 'ADDRESS' => 'address',\n 'CITY' => 'city',\n 'STATE' => 'state',\n 'ZIP' => 'zip',\n 'BIRTHDATE' => 'birth_date',\n 'MSINCEDATE' => 'member_since_date',\n 'BILLDATE' => 'next_retry_bill_date',\n 'EXTERNALID' => 'external_id',\n 'GENDER' => 'gender',\n 'PHONE' => 'full_phone_number',\n 'CJOINDATE' => 'current_join_date'\n }\n end", "def simple_diff old_obj, new_obj, path_prefix, fields=nil\n fields ||= old_obj.keys.concat(new_obj.keys).uniq\n return nil unless fields.to_a.length > 0\n fields.map do |field|\n simple_diff_field(old_obj, new_obj, path_prefix, field)\n end.flatten.compact\n end", "def copy_translation_fields(source, destination)\n self.translated_columns.each do |col|\n destination.send(\"#{col.name}=\", source.send(col.name)) if source.has_attribute?(col.name)\n end\n end", "def local_order\n owner.field_prototypes.keys\n end", "def combine_address_fields\n %w[to cc bcc].map do |field|\n hash_addresses(@mail[field])\n end\n end", "def field_map\n {\n 'C' => 'C',\n 'D' => 'date',\n 'N' => 'N',\n 'P' => 'endpoint',\n 'T' => 'amount'\n }\n end", "def patched\n fields.each do |h|\n next if incoming[h].to_s.empty?\n\n # If we didn't have anything before, take the new version\n if existing[h].to_s.empty? || existing[h].to_s.casecmp('unknown').zero?\n existing[h] = incoming[h]\n next\n end\n\n # These are _expected_ to be different on a term-by-term basis\n next if %i[term group group_id area area_id].include? h\n\n # Can't do much yet with these ones\n next if %i[source given_name family_name].include? h\n\n # Accept multiple values for multi-lingual names\n if h.to_s.start_with? 'name__'\n existing[h] += ';' + incoming[h]\n next\n end\n\n # TODO: accept multiple values for :website, etc.\n next if %i[website].include? h\n\n # Accept values from multiple sources for given fields\n if %i[email twitter facebook image].include? h\n existing[h] = [existing[h], incoming[h]].join(';').split(';').map(&:strip).uniq(&:downcase).join(';')\n next\n end\n\n # If we have the same as before (case insensitively), that's OK\n # NB: the casecmp version on its own isn't sufficient in some cases (e.g. Turkey)\n next if (existing[h] == incoming[h]) || existing[h].casecmp(incoming[h].downcase).zero?\n\n # Accept more precise dates\n if h.to_s.include?('date')\n if incoming[h].include?(existing[h])\n existing[h] = incoming[h]\n next\n end\n # Ignore less precise dates\n next if existing[h].include?(incoming[h])\n end\n\n # Store alternate names for `other_names`\n if h == :name\n @new_headers << :alternate_names\n existing[:alternate_names] ||= nil\n existing[:alternate_names] = [existing[:alternate_names], incoming[:name]].compact.join(';')\n next\n end\n\n @warnings << \" ☁ Mismatch in #{h} for #{existing[:uuid]} (#{existing[h]}) vs #{incoming[h]} (for #{incoming[:id]})\"\n end\n\n existing\n end", "def movements_list_detailed(current_user)\n res={}\n movements.permitted_for_user(current_user).each do |m|\n if m.direction != 'Technical'\n if m.account\n res[m.id] = \"#{m.account.company.code_name} #{m.direction == 'Outcome' ? '-' : '+'}#{m.value.to_s} #{m.account.extended_info} [#{m.movement_group.name}] (#{m.comment})\"\n end\n end\n end\n res\n end", "def form_field_order\n %w{\n\n }\n end", "def form_field_order\n %w{\n\n }\n end", "def changed_columns\n cc = super\n cc = cc.dup if frozen?\n deserialized_values.each{|c, v| cc << c if !cc.include?(c) && original_deserialized_value(c) != v} \n cc\n end", "def get_ordering_fields\n return self.class.ordering_fields&.map(&:to_s) || self.get_fields\n end", "def filter_fields\n @fields_by_range = FIELDS\n @fields_by_index = @fields_by_range.each_with_index.inject({}) do |col,((k,v),i)|\n col[k] = i\n col\n end\n return @fields_by_range,@fields_by_index\n end", "def fields\n if frozen?\n Array(@gapi.fields).map { |f| Field.from_gapi(f).freeze }.freeze\n else\n Array(@gapi.fields).map { |f| Field.from_gapi f }\n end\n end", "def enriched_fields(entity)\n original_fields = entity.fields\n additional_fields = additional_fields_for(entity)\n original_fields.merge(additional_fields)\n end", "def edits\n segments.collect do |segment|\n Edit.new(segment)\n end\n end", "def build_user_emails_for_edit\n @edit[:user_emails] = {}\n to_email = @edit[:new][:email][:to] || []\n users_in_current_groups = User.with_groups(User.current_user.miq_groups).distinct.sort_by { |u| u.name.downcase }\n users_in_current_groups.each do |u|\n next if u.email.blank?\n next if to_email.include?(u.email)\n\n @edit[:user_emails][u.email] = \"#{u.name} (#{u.email})\"\n end\n end", "def merge_details_replace_field(target, victim, selections, values)\n selections.each do |path_fix|\n subrec_name = path_fix[0]\n # this is the index of the order the user arranged the subrecs in the form, not the order of the subrecords in the DB.\n ind = path_fix[1]\n field = path_fix[2]\n position = path_fix[3]\n\n subrec_index = find_subrec_index_in_victim(victim, subrec_name, position)\n\n target[subrec_name][ind][field] = victim[subrec_name][subrec_index][field]\n end\n end", "def compute_current(fields, project, current_proposal)\n result = {}\n fields.each do |f|\n if update_value?(project, f, current_proposal[f])\n result[f] = current_proposal[f][:value]\n elsif project.attribute_present?(f)\n result[f] = project[f]\n end\n end\n result\n end", "def potential_values\n @original_document[value_field]\n end", "def form_field_order\n %w{\n\n }\n end", "def telecom_fields\n \"\".tap do |result|\n fields_for(:telecom) do |f|\n result << f.text_field(:home_phone)\n result << f.text_field(:work_phone)\n result << f.text_field(:mobile_phone)\n result << f.text_field(:vacation_home_phone)\n result << f.text_field(:email)\n result << f.text_field(:url)\n end\n end\n end", "def index_fields\n fields - %i[nonop_source justification review_comment created_at updated_at]\n end", "def index_fields\n fields - %i[nonop_source justification review_comment created_at updated_at]\n end", "def index_fields\n fields - %i[nonop_source justification review_comment created_at updated_at]\n end", "def user_column_fields\n %i(id first_name last_name email created updated).freeze\n end", "def model_fields\n ActiveSupport::OrderedHash.new.tap do |hash|\n properties.map do |u|\n hash[u.name] = TYPES.fetch properties.find{|property| property.name == u.name}.primitive\n end\n end\n end", "def modified_data_fields\n [:gender, :hispanic_latino, :veteran_status, :disability_status]\n end", "def get_field_deserializers()\n return super.merge({\n \"lowerAddress\" => lambda {|n| @lower_address = n.get_string_value() },\n \"upperAddress\" => lambda {|n| @upper_address = n.get_string_value() },\n })\n end", "def getValidAndInvalidAttributes\n @valid_attributes = []\n\n @valid_attributes_from_db.each_with_index do |attr_from_db, i| \n if @excel_attributes.include? attr_from_db[:name]\n @valid_attributes << attr_from_db\n end\n end\n\n x_attr = []\n @valid_attributes.each {|x| x_attr << x[:name]}\n @invalid_attributes = @excel_attributes - x_attr\n end", "def id_fields\n until @field_pos.keys.length == @fields.length\n # iterate through each field range\n @f_ranges.each_with_index do |f_r, f_ind|\n possibles = []\n # see if positional values for unknown fall within the field range\n @positional_values.each_with_index do |p_v, p_ind|\n next if @field_pos.values.include?(p_ind)\n\n possibles.push(p_ind) if in_range?(p_v, f_r)\n end\n # assign field to position if it is the only possible combination\n @field_pos[@fields[f_ind]] = possibles[0] if possibles.length == 1\n end\n end\n end", "def field_names\n (text_fields + html_fields + atom_fields + datetime_fields +\n number_fields + geo_fields).uniq\n end", "def edition_options\n editions= case I18n.locale \n when 'de', :de : ['de-de', 'at-de', 'ch-de', 'int-en', 'in-en']\n else [ 'de-de', 'at-de', 'ch-de', 'int-en', 'in-en'] end\n editions.collect!{ |e| [ I18n.t( \"prefs.edition.#{e.split('-').first}\"), e ] }\n end", "def register_attributes\n [:from , :to]\n end", "def conditions_to_fields(conditions); end", "def history_fields_mapping fields\n history_fields_mapping = fields.with_indifferent_access\n\n define_method(:history_fields_mapping) do\n history_fields_mapping\n end\n end", "def build_editable_fields(actions, guardian, args)\n end", "def updatable_attributes\n [:name]\n end" ]
[ "0.5473017", "0.5410282", "0.52394956", "0.52394956", "0.52394956", "0.51582205", "0.5071715", "0.50582904", "0.50582904", "0.50511974", "0.49796072", "0.49509707", "0.49368137", "0.493614", "0.4902217", "0.4898648", "0.4893831", "0.48906907", "0.48715574", "0.48627362", "0.4855078", "0.4838739", "0.482034", "0.48179775", "0.47997737", "0.47758", "0.47608253", "0.47539085", "0.47486117", "0.4730918", "0.4729649", "0.47282708", "0.47265318", "0.47265318", "0.47265318", "0.47255647", "0.47035772", "0.46727276", "0.46720642", "0.46716204", "0.46562165", "0.4651132", "0.46476373", "0.46473795", "0.46464527", "0.46322325", "0.4631154", "0.46294", "0.46235293", "0.46067327", "0.46043912", "0.46018392", "0.45989466", "0.45878094", "0.45866463", "0.45803842", "0.45706555", "0.45556614", "0.4549916", "0.45460182", "0.4539138", "0.45351836", "0.4533307", "0.4514442", "0.45123056", "0.45088667", "0.4506282", "0.44964942", "0.44755468", "0.4474061", "0.44678047", "0.44678047", "0.44676766", "0.4458409", "0.44578972", "0.44541752", "0.44343758", "0.44289088", "0.44243002", "0.44227287", "0.44175872", "0.44165105", "0.44154674", "0.44108018", "0.44105023", "0.44105023", "0.4409613", "0.44023544", "0.43906507", "0.43899718", "0.4388373", "0.43882176", "0.43868273", "0.43864286", "0.43812835", "0.43727913", "0.43707517", "0.43698955", "0.4360573", "0.4359882" ]
0.80623287
0
Returns a hash that can be used in assign_attributes or update_attributes.
def assignable_fields names = fields.map{|f| f.fetch "name" } to_values = fields.map{|f| f.fetch "to" } Hash[names.zip(to_values)] end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_hash\n self.class.attributes.inject({}) { |memo, name| memo[name] = send(name); memo }\n end", "def to_hash\n @attributes\n end", "def to_hash\n attributes\n end", "def to_hash\n attributes.dup\n end", "def to_hash\n @attributes\n end", "def attributes_hash(object); end", "def hash\n attributes.hash\n end", "def hash\n attributes.hash\n end", "def hash\n attributes.hash\n end", "def to_hash\n _attributes_.inject({}) { |h, (k,v)| h[k] = eval_as_proc(v); h }\n end", "def to_hash\n @attributes\n end", "def to_hash\n {}.tap do |hash|\n @original_attrs.keys.each do |at|\n hash[at] = send(at)\n end\n end\n end", "def attributes_hash\n self.info.to_hash.symbolize_keys\n end", "def to_hash\n r = ATTR_NAMES.inject({}) { |m,e|\n m[e] = self.send(e)\n m\n }\n r[:product_identity_id] = product_identity_id\n r[:langcode] = lang_code_3_chars\n r[:mappings] = mappings\n r\n end", "def hash\n @attrs\n end", "def to_hash\n fattrs.inject({}){|h,a| h.update a => send(a)}\n end", "def to_hash\n Hash[self.class.attributes.map {|attr| [attr, send(attr)]}]\n end", "def to_hash\n ret = HashWithIndifferentAccess.new\n ret.merge!(self.attributes_for_hash)\n ret.merge!(self.associations_for_hash)\n ret.merge!(self.add_on_data_for_hash)\n ret.merge!(self.association_foreign_keys_for_hash)\n ret\n end", "def hash\n [name, type, sub_attributes, multi_valued, description, required, canonical_values, case_exact, mutability, returned, uniqueness, reference_types].hash\n end", "def to_hash\n hash = {}; self.attributes.each { |k,v| hash[k] = v }\n end", "def to_h\n Utils.deep_dup @attributes\n end", "def hash\n self.class.hash ^ key_attributes.hash\n end", "def to_h\n self.class.list_of_attributes.each_with_object({}) do |key, hash|\n val = instance_variable_get :\"@#{key}\"\n hash[key] = Utils.hashify(val) unless val == Dry::Initializer::UNDEFINED\n end\n end", "def to_hash\n Attributes.disable_nest_hash(attrs)\n end", "def to_hash\n attribute_hash = {}\n simple_attributes.each do |att|\n attribute_hash[att] = self.send(att) unless self.send(att).nil?\n end\n \n attribute_hash\n end", "def field_hash\n\n self.yattributes || fields.inject({}) { |r, f| r[f.fkey] = f.value; r }\n end", "def to_hash\n hash = {}\n\n instance_variables.each do |var|\n key = var.to_s.delete(\"@\").to_sym\n val = instance_variable_get(var)\n\n if authlete_model_simple_attribute?(key) or val.nil?\n hash[key] = val\n elsif TAGGED_VALUE_ARRAY_ATTRIBUTES.include?(key)\n hash[key] = val.map { |element| element.to_hash }\n elsif key == :extension\n # For attributes such as :extension\n hash[key] = val.to_hash\n end\n end\n\n hash\n end", "def to_hash\n hash = {}\n self.attributes.each {|attr| hash[attr.to_sym] = self.send(attr)}\n hash\n end", "def attr_hash\n hash = create_hash\n hash.delete(\"id\")\n hash.delete(\"errors\")\n return hash\n end", "def to_hash\n {}.tap do |hash|\n _attributes.each do |key, value|\n if key == :data\n hash.merge!(value)\n else\n hash[key] = value.respond_to?(:to_hash) ? value.to_hash : value\n end\n end\n end\n end", "def serializable_hash options=nil\n hash = super\n eav_attributes_list.each do |attribute|\n hash[attribute] = self.send(attribute)\n end\n\n hash\n end", "def attributes\n full_attributes.to_hash(self)\n end", "def to_hash\n doodle.attributes.inject({}) {|hash, (name, attribute)| hash[name] = send(name); hash}\n end", "def to_hash\n self.class.attribute_names.inject({}) do |hash, key|\n hash[key] = self.send(key); hash\n end\n end", "def attributes\n (@original_attributes||{}).merge(@attributes).keys.inject({}) do |hash, key|\n hash[key] = read_attribute(key)\n hash\n end\n end", "def attrs_hash(attrs)\n HashWithIndifferentAccess.new(attrs)\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n if value.nil?\n is_nullable = self.class.fastly_nullable.include?(attr)\n next if !is_nullable || (is_nullable && !instance_variable_defined?(:\"@#{attr}\"))\n end\n\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n if value.nil?\n is_nullable = self.class.fastly_nullable.include?(attr)\n next if !is_nullable || (is_nullable && !instance_variable_defined?(:\"@#{attr}\"))\n end\n\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n if value.nil?\n is_nullable = self.class.fastly_nullable.include?(attr)\n next if !is_nullable || (is_nullable && !instance_variable_defined?(:\"@#{attr}\"))\n end\n\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n if value.nil?\n is_nullable = self.class.fastly_nullable.include?(attr)\n next if !is_nullable || (is_nullable && !instance_variable_defined?(:\"@#{attr}\"))\n end\n\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n Hash[@attributes.merge(@updated_attributes).map { |k, v| [k, v.is_a?(Entity) ? v.to_hash : v] }]\n end", "def hash\n ([self.class] + self.class.comparison_attrs.map{|x| send(x)}).hash\n end", "def to_hash\n hash = super\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n if value.nil?\n is_nullable = self.class.openapi_nullable.include?(attr)\n next if !is_nullable || (is_nullable && !instance_variable_defined?(:\"@#{attr}\"))\n end\n\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = super\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n if value.nil?\n is_nullable = self.class.openapi_nullable.include?(attr)\n next if !is_nullable || (is_nullable && !instance_variable_defined?(:\"@#{attr}\"))\n end\n\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n args_hash = {}\n arguments.each { |k,v| args_hash[k] = v.attributes } if arguments\n { return_type: return_type }.merge args_hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n r = ATTR_NAMES.inject({}) { |m,e|\n m[e] = self.send(e)\n m\n }\n r[:files] = mappings_for_files.map { |e| e.to_hash }\n r\n end", "def to_hash\r\n hash = {}\r\n self.class.attribute_map.each_pair do |attr, param|\r\n value = self.send(attr)\r\n next if value.nil?\r\n hash[param] = _to_hash(value)\r\n end\r\n hash\r\n end", "def hashify_attributes(attrs)\n Hash.new.tap{ |h| attrs.each{|a| h[a] = self.send(a)} }\n end", "def hash\n \"#{self.class.name}-#{self.id}-#{@__metadata__.cas}-#{@__attributes__.hash}\".hash\n end", "def to_h\n @attributes.dup\n end", "def attributes_hash\n attributes_hash = section.attributes.to_hash\n attributes_hash.each { |k, attr| attributes_hash[k] = attr.value }\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n ::ActiveSupport::HashWithIndifferentAccess.new(hash)\nend", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n ::ActiveSupport::HashWithIndifferentAccess.new(hash)\nend", "def attr_hash(attrs)\n h = {}\n attrs.each {|k,v| h[k] = v}\n h\n end", "def attributes\n @_attributes ||= Hash.new.with_indifferent_access\n end", "def to_hash\n hash = attributes.to_hash\n hash['location'] = location.first.to_hash if location.size == 1\n hash\n end", "def attributes_hash\n fail 'sub class to implement'\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end", "def to_hash\n hash = {}\n self.class.attribute_map.each_pair do |attr, param|\n value = self.send(attr)\n next if value.nil?\n hash[param] = _to_hash(value)\n end\n hash\n end" ]
[ "0.7831759", "0.7761908", "0.77386785", "0.77325606", "0.7714387", "0.7684327", "0.7655504", "0.7655504", "0.7655504", "0.76483", "0.7636641", "0.7520305", "0.75115657", "0.74820614", "0.74315304", "0.7401671", "0.7361326", "0.73356074", "0.7331077", "0.73300683", "0.72988635", "0.728544", "0.7210123", "0.71881473", "0.71747386", "0.716151", "0.71469253", "0.71385163", "0.7102058", "0.7078695", "0.7072677", "0.7055519", "0.70541745", "0.70454", "0.7032502", "0.7008713", "0.6987209", "0.6987209", "0.6987209", "0.6987209", "0.69853014", "0.69845915", "0.69569546", "0.69569546", "0.69506675", "0.69380283", "0.6930218", "0.69204044", "0.6919177", "0.69072264", "0.6907211", "0.69052905", "0.6903758", "0.6903758", "0.6893613", "0.6887691", "0.68860394", "0.68844056", "0.687106", "0.687106", "0.687106", "0.687106", "0.687106", "0.687106", "0.687106", "0.68585366", "0.6858471", "0.6857236", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647", "0.68568647" ]
0.0
-1
Cancancan GET /products GET /products.json
def index if user_signed_in? and current_user.admin == true @products = Product.where(nil) # creates an anonymous scope @products = Product.joins(:cultivation).where(cultivations: { id: params[:cultivation] }) if params[:cultivation].present? @products = @products.joins(:cycle).where(cycles: {id: params[:cycle]}) if params[:cycle].present? else @products = Product.ativos.where(nil) # creates an anonymous scope @products = Product.ativos.joins(:cultivation).where(cultivations: { id: params[:cultivation] }) if params[:cultivation].present? @products = @products.joins(:cycle).where(cycles: {id: params[:cycle]}) if params[:cycle].present? end ##@products = @products.cycle(params[:cycle]) if params[:cycle].present? ##if params[:cultivation] ##@products = Product.para_o_cultivo(params[:cultivation]) ##else ##@products = Product.all ##end ##@cycles = Cycle.all #category = Category.where(:name => params[:name]).first #@posts = category.posts end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def products\n request :public, :get, :products\n end", "def user_products\n @products = current_user.products\n\n respond_to do |format|\n format.html\n format.json { render json: @products }\n end\n end", "def index\n if is_my_resource(params[:prossumer_id])\n @products = Product.where(prossumer_id: params[:prossumer_id]).as_json({\n cycle_id: params[:cycle_id],\n include: {\n prossumer: {\n except: [:encrypted_password, :salt, :confirm_hash]\n },\n product_category: {}\n }\n })\n render json: @products\n end\n end", "def show\n # is_my_resource(params[:id])\n\n # prossumerProductsIds = Prossumer.find(params[:id]).products.ids\n render json: ProductAuth.where({product_id: params[:id], group_id: params[:group_id]}).first.as_json(:include => :product)\n end", "def index\n @products = Product.all.page(params[:page]).per(10)\n authorize Product\n end", "def index\n @products = @user.products\n # was @products = Product.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @products }\n end\n end", "def index\n limit = params[:limit]&.to_i || 10\n page = params[:page]&.to_i || 0\n if params[:available] == \"1\"\n @products = Product.paginate(page, limit).available\n else\n @products = Product.paginate(page, limit)\n end\n render json: @products\n end", "def show\n render json: @product\n end", "def index\n b_admin = current_user.admin? rescue false\n @products = Product.filter_by_params(b_admin, params)\n #@products = Product.available\n \n @title = Product.page_description(params)\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @products }\n end\n end", "def show\n @products = Product.find_by_id(params[:id])\n msg = { status: 200 , product: @products }\n respond_to do |format|\n format.html { render json: msg }\n format.json { render json: msg }\n end\n end", "def show\n render json: @product, status: 200\n end", "def show\n render json: @product\n end", "def all\n @products = Product.get_list_active_products.page(params[:page]).per(10)\n if @products.present?\n @products\n else\n @object = 'product'\n render \"api/v1/errors/404\", status: 401\n end\n end", "def show\n @product = @person.products.find(params[:id])\n\n respond_to do |format|\n format.json { render :json => @product }\n end\n end", "def get_products()\n\tputs \"Getting products\"\n\tresponse = request_get(\"/api/product\")\n\tputs response.body\nend", "def show\n @product = Product.find(params[:id])\n json_response(params)\n end", "def index\n @products = Product.all\n render json: @products\n end", "def show\n @product = @user.products.find(params[:id])\n # was @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n render json: @product\n end", "def show\n @items = cart.items\n @products = @items.collect(&:product)\n \n respond_to do |format|\n format.html { render layout: false } \n format.json { render json: @items }\n end\n end", "def index\n @products = current_user.products\n end", "def show\n result = Product.find(params[:id])\n render json: {\n status: :ok,\n product: result\n } \n end", "def index\n @products = @co.products\n end", "def show\n product = Product.find_by_id(params[:id])\n\n render json: product\n end", "def show\n @product = Product.find(params[:id])\n render json: @product, status: :ok\n end", "def show\n if is_my_resource(@product.prossumer_id)\n render json: @product.as_json({\n cycle_id: params[:cycle_id],\n include: {\n prossumer: {\n except: [:encrypted_password, :salt, :confirm_hash]\n },\n product_category: {}\n }\n })\n end\n end", "def products\n end", "def show \n user = current_user.user_info\n product = user.products.find(params[:id]) if !user.admin\n product = Product.find(params[:id]) if user.admin \n # respond_to do |format|\n # format.html # show.html.erb\n # format.json { render json: @product }\n # end\n p = ShopifyAPI::Product.find product.shopify_id\n redirect_to 'http://shopulse.myshopify.com/products/'+p.handle\n end", "def get_authorization_products_with_http_info(opts = {})\n if @api_client.config.debugging\n @api_client.config.logger.debug \"Calling API: AuthorizationApi.get_authorization_products ...\"\n end\n \n # resource path\n local_var_path = \"/api/v2/authorization/products\".sub('{format}','json')\n\n # query parameters\n query_params = {}\n\n # header parameters\n header_params = {}\n\n # HTTP header 'Accept' (if needed)\n local_header_accept = ['application/json']\n local_header_accept_result = @api_client.select_header_accept(local_header_accept) and header_params['Accept'] = local_header_accept_result\n\n # HTTP header 'Content-Type'\n local_header_content_type = ['application/json']\n header_params['Content-Type'] = @api_client.select_header_content_type(local_header_content_type)\n\n # form parameters\n form_params = {}\n\n # http body (model)\n post_body = nil\n \n auth_names = ['PureCloud OAuth']\n data, status_code, headers = @api_client.call_api(:GET, local_var_path,\n :header_params => header_params,\n :query_params => query_params,\n :form_params => form_params,\n :body => post_body,\n :auth_names => auth_names,\n :return_type => 'OrganizationProductEntityListing')\n if @api_client.config.debugging\n @api_client.config.logger.debug \"API called: AuthorizationApi#get_authorization_products\\nData: #{data.inspect}\\nStatus code: #{status_code}\\nHeaders: #{headers}\"\n end\n return data, status_code, headers\n end", "def getKind\n @products = Product.where(\"kind = ?\", params[:kind]).available.PriceOrder.paginate(page: params[:page], per_page: 5)\n render json: @products\n end", "def product(name)\n get(\"/apiproducts/#{name}\")\n end", "def index\n \tproducts = Product.all\n \trender json: products\n \tend", "def show\n @product = Product.find(params[:id])\n\n render json: @product\n end", "def show\n render json: @product_management\n end", "def products\n Product.all\n end", "def index\n @products = Product.all\n msg = { status: 200 , product: @products }\n respond_to do |format|\n format.html { render json: msg }\n format.json { render json: msg }\n end\n end", "def obtains_product\n product = Product.find(params[:product_id])\n @product = product.user_id == @current_user.id ? product : nil\n (render(json: { e: 'AUTH' }, status: :unauthorized) && nil) if @product.nil?\n end", "def index\n begin\n @products = Product.all\n render json: @products, status: 200\n rescue => exception\n render json: { errors: exception }\n end\n end", "def show\n @products = Product.where({contry_id: params[:id]}).paginate(page: params[:page], per_page: 30)\n\n end", "def show\n @category = Category.find(params[:id])\n @search = @category.products.search(params[:q])\n @products = @search.result.page(params[:page]).per(current_user.list_page_size)\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @category }\n end\n end", "def show\n @products = Product.find(params[:id])\n end", "def index\n @products = current_user.products.all\n end", "def products\n run(:get,\"/school_products\", [200])\n end", "def show\n render :json => Producto.find(params[:id])\n end", "def show_cart\n render json: User.find(params[:id]).cart_products\n end", "def show\n @products = CatalogMebeli.find(params[:id]).products\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @product }\n end\n end", "def index\n @products = Product.all\n respond_to do |format|\n format.html\n format.json { render :json => @product }\n end\n end", "def index\n if current_user.admin?\n @products = Product.all\n else\n if current_user.private?\n @products = Product.where(owner_id: current_user.private_id)\n elsif current_user.business?\n @products = Product.where(owner_id: current_user.business_id)\n end\n\n end\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @products }\n end\n end", "def new\n @product = Product.new\n authorize! :create, @product\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @product }\n end\n end", "def show\n json_response(@api_v1_product)\n end", "def show\n #@product = Product.find(params[:id])\n #authorize! :show, @product\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @product }\n end\n end", "def index\n @products = Product.all\n render json: {is_success: true, error_code: 200, message: \"Products Found Successfully\", result: @products}, status: 200\n end", "def show\n @breadcrumb = 'read'\n @product_type = ProductType.find(params[:id])\n @products = @product_type.products.paginate(:page => params[:page], :per_page => per_page).order('product_code')\n \n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product_type }\n end\n end", "def show\n @product = Product.find(params[:id])\n @donor = Donor.all\n @event = Event.all\n @category = Category.all\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = ProductProduct.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def new\n\n authorize! :manage, @product, :message => 'Not authorized as an administrator'\n @product = Product.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @product }\n end\n end", "def index\n @products = Product.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @products }\n end\n end", "def index\n @products = Product.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @products }\n end\n end", "def index\n @products = Product.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @products }\n end\n end", "def index\n @products = Product.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @products }\n end\n end", "def product\n request = Request.where(user_id: [params[:user_id]])\n render :json => request.as_json\n end", "def show\n authorize_action_for @property, at: current_store\n\n respond_to do |format|\n format.json { render json: @property.product_properties, status: 200 }\n format.html\n end\n end", "def get_accessible_products\n return call('Product.get_accessible_products')\n end", "def show\n respond_to do |format|\n format.html\n format.json { render :json => @product }\n end\n end", "def show\n respond_to do |format|\n format.html {render \"products/show\"}\n format.json {\n product = Product.where(id: params[:id]).includes(:category).first\n render json: product.as_json(include: [:category, :photo])\n }\n end\n end", "def index\n if params[:product_id]\n @promotions = Product.find(params[:product_id]).promotions\n else\n @promotions = Promotion.all\n end\n\n render json: @promotions\n end", "def index\n @member = Member.find(params[:member_id])\n @products = @member.products\n end", "def index\n @products = Product.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render :json => @products }\n end\n end", "def index\n @products = Product.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render :json => @products }\n end\n end", "def index\n @products = Product.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render :json => @products }\n end\n end", "def index\n @products = Product.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render :json => @products }\n end\n end", "def show\n @product = Product.find(params[:id])\n \n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def index\n @products = Product.order('item_code ASC').page(params[:page]).per(25)\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @products }\n end\n end", "def index\n render :json => Producto.all\n end", "def show\n @product = Product.find(params[:id])\n \n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def index\n @products = current_company.products.order('created_at desc').page(params[:page]).per(20)\n render json: @products, meta: {total_pages: @products.total_pages, total_count: @products.total_count}\n end", "def view_product\n to_json(\n only: [:id, :title, :description, :key_information],\n methods: [:photo_url, :net_mrp, :mrp_per_unit, :quantity],\n :include => {\n store: {\n only: [:name, :id],\n methods: [:full_address]\n }\n }\n )\n end", "def index\n @cart_products = if current_user.cart.present?\n current_user.cart.carts_products.includes(:product).all\n else\n []\n end\n render formats: :json\n end", "def get_products_by_category\n json_response({ message: 'NOT IMPLEMENTED' })\n end", "def index\n @products = Product.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @inventories }\n end\n end", "def index\n @api_v1_products = Product.all\n json_response(@api_v1_products)\n end", "def index\n @product_managements = ProductManagement.all\n\n render json: @product_managements\n end", "def index\n @carts_products = @cart.carts_products.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @carts_products }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end" ]
[ "0.75524724", "0.72646224", "0.72050476", "0.71545655", "0.70793116", "0.6971261", "0.6809366", "0.6802797", "0.6795268", "0.67786586", "0.6776278", "0.67725056", "0.6721342", "0.67189926", "0.6694022", "0.66917676", "0.668584", "0.66723", "0.6613482", "0.66031337", "0.6602001", "0.65866065", "0.6585286", "0.6580732", "0.65753126", "0.65683967", "0.6551566", "0.6536206", "0.65309983", "0.65173364", "0.6502953", "0.650168", "0.65008515", "0.6496625", "0.6490214", "0.6470846", "0.64656997", "0.6463603", "0.64513904", "0.644391", "0.64438486", "0.64420485", "0.64415056", "0.6419717", "0.6413171", "0.6412052", "0.64102167", "0.64027244", "0.6396619", "0.63954663", "0.63947135", "0.6393005", "0.63924676", "0.63860935", "0.6385169", "0.6368423", "0.63598317", "0.6359783", "0.6359783", "0.6359783", "0.63594556", "0.6359362", "0.63494354", "0.6347739", "0.6329899", "0.6327898", "0.6327582", "0.632744", "0.632744", "0.632744", "0.632744", "0.63175976", "0.63173157", "0.63169694", "0.63158965", "0.63150793", "0.62991047", "0.62988484", "0.62957394", "0.6295617", "0.6281557", "0.6276742", "0.6271658", "0.6269694", "0.62691164", "0.62691164", "0.62691164", "0.62691164", "0.62691164", "0.62691164", "0.62691164", "0.62691164", "0.62691164", "0.62691164", "0.62691164", "0.62691164", "0.62691164", "0.62691164", "0.62691164", "0.62691164", "0.62691164" ]
0.0
-1
GET /products/1 GET /products/1.json
def show @benefits = ProductsPurposesRelation.where(product_id: @product.id) end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def product(name)\n get(\"/apiproducts/#{name}\")\n end", "def show\n product = Product.find_by_id(params[:id])\n\n render json: product\n end", "def show\n @product = Product.find(params[:id])\n\n render json: @product\n end", "def index\n @api_v1_products = Product.all\n json_response(@api_v1_products)\n end", "def show\n @product = Product.find(params[:id])\n json_response(params)\n end", "def get_products()\n\tputs \"Getting products\"\n\tresponse = request_get(\"/api/product\")\n\tputs response.body\nend", "def show\n @product = Product.find(params[:id])\n render json: @product, status: :ok\n end", "def show\n result = Product.find(params[:id])\n render json: {\n status: :ok,\n product: result\n } \n end", "def show\n @products = Product.find_by_id(params[:id])\n msg = { status: 200 , product: @products }\n respond_to do |format|\n format.html { render json: msg }\n format.json { render json: msg }\n end\n end", "def index\n @products = Product.all\n render json: @products\n end", "def show\n render json: @product, status: 200\n end", "def show\n @product = Product.find(params[:id])\n\n render json: @product\n end", "def show\n json_response(@api_v1_product)\n end", "def index\n @products = Product.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @products }\n end\n end", "def index\n @products = Product.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @products }\n end\n end", "def index\n @products = Product.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @products }\n end\n end", "def index\n @products = Product.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @products }\n end\n end", "def show\n render :json => Producto.find(params[:id])\n end", "def index\n begin\n @products = Product.all\n render json: @products, status: 200\n rescue => exception\n render json: { errors: exception }\n end\n end", "def show\n @product = ProductProduct.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n render json: @product\n end", "def index\n \tproducts = Product.all\n \trender json: products\n \tend", "def products\n request :public, :get, :products\n end", "def show\n @product = Product.find(params[:id])\n \n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def index\n @products = Product.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render :json => @products }\n end\n end", "def index\n @products = Product.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render :json => @products }\n end\n end", "def index\n @products = Product.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render :json => @products }\n end\n end", "def index\n @products = Product.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render :json => @products }\n end\n end", "def show\n render json: @product\n end", "def show\n @product = Product.find(params[:id])\n \n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def index\n @products = Product.all\n respond_to do |format|\n format.html\n format.json { render :json => @product }\n end\n end", "def show\n @product = @person.products.find(params[:id])\n\n respond_to do |format|\n format.json { render :json => @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render :json => @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render :json => @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render :json => @product }\n end\n end", "def show\n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render :json=> @product }\n end\n end", "def index\n @products = Product.all\n msg = { status: 200 , product: @products }\n respond_to do |format|\n format.html { render json: msg }\n format.json { render json: msg }\n end\n end", "def index\n @products = Product.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @inventories }\n end\n end", "def index\n @products = @user.products\n # was @products = Product.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @products }\n end\n end", "def index\n limit = params[:limit]&.to_i || 10\n page = params[:page]&.to_i || 0\n if params[:available] == \"1\"\n @products = Product.paginate(page, limit).available\n else\n @products = Product.paginate(page, limit)\n end\n render json: @products\n end", "def index\n render :json => Producto.all\n end", "def show\n respond_to do |format|\n format.html\n format.json { render :json => @product }\n end\n end", "def show\n puts \"the params are #{params}\" \n @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def getKind\n @products = Product.where(\"kind = ?\", params[:kind]).available.PriceOrder.paginate(page: params[:page], per_page: 5)\n render json: @products\n end", "def show\n @product_item = ProductItem.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render :json => @product_item }\n end\n end", "def index\n @food_products = FoodProduct.all\n render json: @food_products\n end", "def index\n #@products = Product.all\n @products = Product.paginate( :page => params[:page],\n :per_page => 40\n )\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @products }\n end\n end", "def index\n @ordered_products = OrderedProduct.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @ordered_products }\n end\n end", "def index\n products = Product.all\n render json: {message: \"ok\", data: products}\n puts \"hello\"\n\n end", "def user_products\n @products = current_user.products\n\n respond_to do |format|\n format.html\n format.json { render json: @products }\n end\n end", "def show\n @shopifyproduct = Shopifyproduct.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @shopifyproduct }\n end\n end", "def show\n @product = @user.products.find(params[:id])\n # was @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def show\n @ordered_product = OrderedProduct.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @ordered_product }\n end\n end", "def show\n @products = Product.find(params[:id])\n end", "def index\n @products = Product.order('item_code ASC').page(params[:page]).per(25)\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @products }\n end\n end", "def index\n if is_my_resource(params[:prossumer_id])\n @products = Product.where(prossumer_id: params[:prossumer_id]).as_json({\n cycle_id: params[:cycle_id],\n include: {\n prossumer: {\n except: [:encrypted_password, :salt, :confirm_hash]\n },\n product_category: {}\n }\n })\n render json: @products\n end\n end", "def get_product\n json_response({ message: 'NOT IMPLEMENTED' })\n end", "def show\n @onecompany_product = Onecompany::Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @onecompany_product }\n end\n end", "def index\n @tipo_products = TipoProduct.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @tipo_products }\n end\n end", "def index\n begin\n \n products = Product.all\n render json: {\n success: true,\n totalRecords: products.length,\n data: (ActiveModel::ArraySerializer.new(products, each_serializer: ProductSerializer))\n }, status: 200\n rescue ActiveRecord::RecordNotFound => e\n render json: {\n success: false,\n errors: e.message\n }, status: 404\n rescue Exception => e\n render json: {\n success: false,\n errors: e.message\n }, status: 500\n end\n end", "def index\n @products = Product.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.xml { render :xml => @products }\n format.json { render :json => @products }\n end\n end", "def index\n @products = Product.all\n render json: {is_success: true, error_code: 200, message: \"Products Found Successfully\", result: @products}, status: 200\n end", "def show\n render json: @product_management\n end", "def show\n @product2 = Product2.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product2 }\n end\n end", "def index\n #@products = Product.all\n @products = Product.order('created_at ASC').page(params[:page]).per(12)\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @products }\n end\n end", "def index\n @product_images = ProductImage.where(product_uuid: params[:product_id])\n render json: @product_images, status: 200\n end", "def show\n @prod = Prod.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @prod }\n end\n end", "def set_api_v1_product\n begin\n @api_v1_product = Product.find(params[:id])\n rescue => ex\n json_response({error: ex.message}, :not_found)\n end\n end", "def getProduct( product_id)\n params = Hash.new\n params['product_id'] = product_id\n return doCurl(\"get\",\"/product\",params)\n end", "def show\n respond_to do |format|\n format.html {render \"products/show\"}\n format.json {\n product = Product.where(id: params[:id]).includes(:category).first\n render json: product.as_json(include: [:category, :photo])\n }\n end\n end", "def show\n @items = cart.items\n @products = @items.collect(&:product)\n \n respond_to do |format|\n format.html { render layout: false } \n format.json { render json: @items }\n end\n end", "def show\n @star_product = Star::Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @star_product }\n end\n end", "def show\n render json: @food_product\n end", "def show\n @producto = Producto.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @producto }\n end\n end", "def show\n # is_my_resource(params[:id])\n\n # prossumerProductsIds = Prossumer.find(params[:id]).products.ids\n render json: ProductAuth.where({product_id: params[:id], group_id: params[:group_id]}).first.as_json(:include => :product)\n end", "def show\n @home_searches_product = Home::Searches::Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @home_searches_product }\n end\n end", "def show\n @magento_product = MagentoProduct.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @magento_product }\n end\n end", "def show\n @product_price = ProductPrice.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product_price }\n end\n end", "def show\n @prod = Prod.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render :json => @prod }\n end\n end" ]
[ "0.77224106", "0.76329553", "0.76313764", "0.7607208", "0.75760156", "0.7552171", "0.7506385", "0.7484625", "0.745622", "0.74501616", "0.74376804", "0.7421124", "0.7362056", "0.7318765", "0.73185545", "0.73185545", "0.73185545", "0.7316062", "0.7311976", "0.73088664", "0.72941", "0.729187", "0.7291441", "0.7276286", "0.7272662", "0.7272662", "0.7272662", "0.7272662", "0.72700304", "0.7255538", "0.7233478", "0.72326964", "0.72326964", "0.72326964", "0.72326964", "0.72326964", "0.72326964", "0.72326964", "0.72326964", "0.72326964", "0.72326964", "0.72326964", "0.72326964", "0.72326964", "0.72326964", "0.72326964", "0.72326964", "0.72326964", "0.72326964", "0.72326964", "0.72326964", "0.7232629", "0.7211553", "0.7191871", "0.71728355", "0.71728355", "0.71728355", "0.717033", "0.71582985", "0.7140302", "0.70848453", "0.7072449", "0.70673674", "0.706432", "0.7038531", "0.7004358", "0.69840044", "0.69693065", "0.6968406", "0.6964786", "0.6959802", "0.695388", "0.6943222", "0.69415545", "0.6939184", "0.6928706", "0.69022006", "0.6888529", "0.68479115", "0.6837828", "0.683746", "0.6822597", "0.6821544", "0.6817213", "0.68145454", "0.6811117", "0.67925274", "0.678835", "0.6788051", "0.67867345", "0.67776203", "0.6769408", "0.67637855", "0.6759666", "0.6751121", "0.67510307", "0.6748587", "0.6746096", "0.6745283", "0.67408913", "0.6733844" ]
0.0
-1
POST /products POST /products.json
def create @product = Product.new(product_params) #permitted_columns = params[:products_purposes_relations].permit(:product_id, :purpose_id, :stars) # @products_purposes_relation = @product.products_purposes_relations.create(permitted_columns) respond_to do |format| if @product.save format.html { redirect_to @product, notice: t('create_success') } format.json { render :show, status: :created, location: @product } else format.html { render :new } format.json { render json: @product.errors, status: :unprocessable_entity } end end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create\n @product = Product.new(product_args)\n\n if @product.save\n render json: Product.all, status: :created\n else\n render json: @product.errors, status: :unprocessable_entity\n end\n end", "def create\n if params[:products]\n params[:products].each do |product|\n @product = Product.new(name: product[:name],\n brand: product[:brand],\n model: product[:model],\n sku: product[:sku],\n price: product[:price],\n desc: product[:desc])\n if [email protected]\n render json: @product.errors.full_messages, status: 422\n end\n end\n render 'api/products/index'\n else\n @product = Product.new(product_params)\n if @product.save\n render 'api/products/show'\n else\n render json: @product.errors.full_messages, status: 422\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n if @product.save\n render json: @product, status: :created, location: @product\n else\n render json: @product.errors, status: :unprocessable_entity\n end\n end", "def create\n @product = @collection.products.build(product_params)\n\n if @product.save\n render json: @product, status: :created#, location: @collection\n else\n render json: @product.errors, status: :unprocessable_entity\n end\n end", "def create\n @product = Product.new(product_params)\n\n if @product.save\n render json: @product, status: :created\n else\n render json: @product.errors, status: :unprocessable_entity\n end\n end", "def create\n @product = Product.create!(product_params)\n json_response(@product, :created)\n end", "def create\n newProduct = Product.new(products_params)\n if newProduct.save\n msg = { status: 201 , product: newProduct }\n respond_to do |format|\n format.html { render json: msg }\n format.json { render json: msg }\n end\n else\n msg = { status: 422 }\n respond_to do |format|\n format.html { render json: msg }\n format.json { render json: msg }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n if @product.save\n render json: @product, status: :created, location: @product\n else\n render json: @product.errors, status: :unprocessable_entity\n end\n end", "def create\n\n product_details = params.permit(:title, :inventory_count, :price)\n success = Product.create(product_details)\n\n render json: { success: success }\n end", "def create\n @product = Product.new(product_params)\n\n if @product.save\n render json: @product, status: :created\n else\n render json: @product.errors, status: :unprocessable_entity\n end\n end", "def create\n product = Product.new(product_params)\n if product.save\n render json: ProductSerializer.new(product).serialized_json\n end\n end", "def create\n @product = Product.new(product_params)\n if @product.save\n render json: {id: @product.id}\n else\n render json: {msg: @product.errors.full_messages.first}, status: :unprocessable_entity\n end\n end", "def create\n @product = Product.create(product_params)\n if @product.save\n # Respond to html with a redirect and json\n respond_to do |format|\n format.html do\n flash[:notice] = 'Product added'\n redirect_to products_path\n end\n format.json do\n render json: product.to_json\n end\n end\n else\n # Respond to html with a redirect and json\n respond_to do |format|\n format.html do\n flash.now[:error] = 'Error adding product'\n render :new\n end\n format.json do\n render json: { errors: @product.errors.full_messages }, status: 422\n end\n end\n end\n end", "def create\n @product = Product.new(product_params)\n @product.user = current_api_v1_user\n respond_to do |format|\n if @product.save\n params[:product][:properties].try(:each) do |k,v|\n @product.product_properties.create(property: Property.find(k), value: v)\n end\n params[:product][:colors].try(:each) do |c|\n @product.colors.create(name: c[:name].downcase, code: c[:code])\n end\n params[:product][:photos].try(:each) do |c|\n @product.photos.create(image: c)\n end\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created}\n else\n format.html { render :new }\n format.json { render json: @product.errors.full_messages, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(params[:product])\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product,\n :notice=> 'Product was successfully created.' }\n format.json { render :json=> @product, :status=> :created,\n :location=> @product }\n else\n format.html { render :action=> \"new\" }\n format.json { render :json=> @product.errors,\n :status=> :unprocessable_entity }\n end\n end\n end", "def create\n if @product.save\n render :show, status: :created, location: api_v1_product_path(@product)\n else\n render json: @product.errors, status: :unprocessable_entity\n end\n end", "def create\n @product = Product.new(params[:product])\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, :notice => 'Product was successfully created.' }\n format.json { render :json => @product, :status => :created, :location => @product }\n else\n format.html { render :action => \"new\" }\n format.json { render :json => @product.errors, :status => :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(params[:product])\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, :notice => 'Product was successfully created.' }\n format.json { render :json => @product, :status => :created, :location => @product }\n else\n format.html { render :action => \"new\" }\n format.json { render :json => @product.errors, :status => :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(params[:product])\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, :notice => 'Product was successfully created.' }\n format.json { render :json => @product, :status => :created, :location => @product }\n else\n format.html { render :action => \"new\" }\n format.json { render :json => @product.errors, :status => :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(params[:product])\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to products_path, notice: 'Product was successfully created.' }\n format.json { render json: @product, status: :created, location: @product }\n else\n format.html { render action: \"new\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(params[:product])\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: t(:product_created) }\n format.json { render json: @product, status: :created, location: @product }\n else\n format.html { render action: \"new\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n return unless product_params\n render json: Product.create_product(\n @product_params,\n category_list,\n @current_user.id\n ).simple_info, status: :created\n rescue => e\n render json: { error: e }, status: :bad_request\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to products_url, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: products_url }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(params[:product])\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render json: @product, status: :created, location: @product }\n else\n format.html { render action: \"new\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(params[:product])\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render json: @product, status: :created, location: @product }\n else\n format.html { render action: \"new\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(params[:product])\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render json: @product, status: :created, location: @product }\n else\n format.html { render action: \"new\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(params[:product])\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render json: @product, status: :created, location: @product }\n else\n format.html { render action: \"new\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(params[:product])\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render json: @product, status: :created, location: @product }\n else\n format.html { render action: \"new\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(params[:product])\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render json: @product, status: :created, location: @product }\n else\n format.html { render action: \"new\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(params[:product])\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render json: @product, status: :created, location: @product }\n else\n format.html { render action: \"new\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(params[:product])\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render json: @product, status: :created, location: @product }\n else\n format.html { render action: \"new\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(params[:product])\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render json: @product, status: :created, location: @product }\n else\n format.html { render action: \"new\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(params[:product])\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render json: @product, status: :created, location: @product }\n else\n format.html { render action: \"new\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(params[:product])\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render json: @product, status: :created, location: @product }\n else\n format.html { render action: \"new\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(params[:product])\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render json: @product, status: :created, location: @product }\n else\n format.html { render action: \"new\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(params[:product])\n respond_to do |format|\n if @product.save\n current_user.user_info.products.push @product\n Shopify.create @product\n format.html { redirect_to :action => 'index' }\n format.json { render json: @product, status: :created, location: @product }\n else\n format.html { render action: \"new\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n \n end", "def send_product(product)\n request(product, \"product\", :post, {method: \"add\"})\n end", "def create\n @product = Product.new(product_params)\n \n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to products_url, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n @product.save\n set_products\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to products_url, notice: \"Product was successfully created.\" }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new, status: :unprocessable_entity }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n ActiveRecord::Base.transaction do\n begin\n @api_v1_product = Product.create!(api_v1_product_params)\n\n json_response @api_v1_product, :created\n rescue => ex\n json_response({error: ex.message}, :unprocessable_entity)\n raise ActiveRecord::Rollback\n end\n end\n end", "def create\n\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product,\n notice: 'Product was successfully created.' }\n format.json { render :show, status: :created,\n location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors,\n status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: \"Product was successfully created.\" }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new, status: :unprocessable_entity }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def addProd()\n if(!authenticateAdmin(params[:admin_id], params[:admin_auth_key]))\n render json: {status: false, reason: \"Authentication Failed\", data: \"\"}\n return\n end\n p = Product.new(name: params[:name], price: params[:price].to_f, category_id: params[:cat_id], picture_list: '[]')\n status = p.save\n error = \"\"\n if(p.errors.full_messages.count > 0)\n error = c.errors.full_messages[0]\n end\n render json: {status: status, reason: error, data: \"\"}\n end", "def create\n @product = Product.new(params[:product])\n\n if @product.save\n \n respond_with(@product, :location=>products_url)\n else\n respond_with(@product, :head=>:bad_request)\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render json: @product }\n format.js { render status: :ok }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n format.js { render status: :unprocessable_entity }\n end\n end\n end", "def create\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render action: 'show', status: :created, location: @product }\n else\n format.html { render action: 'new' }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render action: 'show', status: :created, location: @product }\n else\n format.html { render action: 'new' }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render action: 'show', status: :created, location: @product }\n else\n format.html { render action: 'new' }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render action: 'show', status: :created, location: @product }\n else\n format.html { render action: 'new' }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render action: 'show', status: :created, location: @product }\n else\n format.html { render action: 'new' }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create(product)\n validate_type!(product)\n\n attributes = sanitize(product)\n _, _, root = @client.post(\"/products\", attributes)\n\n Product.new(root[:data])\n end", "def create\n # @order = Order.new() \n total = 0\n \n @order = Order.new()\n for product in params[:_json]\n \n if (product[:quantity].nil? || product[:quantity].to_f < 1 || !isint(product[:quantity]))\n # Handle case when order invalid quantity\n render json: \"\", status: :bad_request\n return\n end\n\n @product = Product.find_by_name_and_size_id(product[:product], product[:size]) \n if @product.nil?\n # Handle case when order invalid products\n render json: \"\", status: :not_found\n return\n end \n total = total + @product.price * product[:quantity].to_f \n @order.orders_products << OrdersProduct.new(:product => @product, :hot => product[:hot], :quantity => product[:quantity]) \n end \n\n @order.total = total\n\n if @order.save\n render json: @order, status: :created, location: @order\n else\n render json: @order.errors, status: :unprocessable_entity\n end\n end", "def create\n @product = Product.new(params[:product])\n @product.shop = Shop.find_by_uuid params[:shop_id]\n\n respond_to do |format|\n if @product.save!\n format.html { redirect_to shop_products_path(@product.shop.uuid), notice: 'Product was successfully created.' }\n format.json { render json: @product.to_json(:include => {:product_variants => {:include => [:option_types,:pictures]}})}\n else\n format.html { render action: \"new\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def new\n @product = Product.new\n\n render json: @product\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Продукт успешно создан.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @admin_product = Admin::Product.new(admin_product_params)\n\n respond_to do |format|\n if @admin_product.save\n format.html { redirect_to admin_products_url }\n format.json { render :show, status: :created, location: @admin_product }\n else\n format.html { render :new }\n format.json { render json: @admin_product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: t('.message') }\n format.json { render action: 'show', status: :created, location: @product }\n else\n format.html { render action: 'new' }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(params[:product])\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to admin_products_url, notice: 'Product was successfully created.' }\n format.json { render json: @product, status: :created, location: @product }\n else # save fails due to validation error -> show errors\n format.html { render action: \"new\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = @person.products.build(params[:model])\n\n respond_to do |format|\n if @product.save\n format.json { render :json => @product, :status => :created}\n else\n format.json { render :json => @product.errors, :status => :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Изделие успешно создано.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n respond_to do |format|\n if @product.save\n format.html {redirect_to @product, notice: 'Producto creado.'}\n format.json {render :show, status: :created, location: @product}\n\n else\n format.html {render :new}\n format.json {render json: @product.errors, status: :unprocessable_entity}\n end\n\n end\n end", "def create\n @add_product = AddProduct.new(params[:add_product])\n\n respond_to do |format|\n if @add_product.save\n format.html { redirect_to @add_product, notice: 'Add product was successfully created.' }\n format.json { render json: @add_product, status: :created, location: @add_product }\n else\n format.html { render action: \"new\" }\n format.json { render json: @add_product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(params[:product].merge :user_id => current_user.id)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to root_url, notice: 'Product was successfully created.' }\n format.json { render json: @product, status: :created, location: @product }\n else\n format.html { render action: \"new\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def add_product\n @user = User.find_by(id: params['user'])\n products = params['products'].split(',')\n products.each do |product_id|\n @user.cart.products << Product.find_by(id: product_id)\n end \n render json: {data: @user.cart.products }\n end", "def create\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render action: 'show', status: :created, location: @product }\n else\n format.html { render action: 'new' }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end" ]
[ "0.7674954", "0.7589692", "0.756074", "0.7531862", "0.7531213", "0.7507928", "0.7420413", "0.7391407", "0.7374718", "0.7355908", "0.73231804", "0.72869605", "0.7144144", "0.7050259", "0.7047559", "0.70415026", "0.7037288", "0.7037288", "0.7037288", "0.70322204", "0.70255643", "0.6992655", "0.69766444", "0.69545984", "0.69545984", "0.69545984", "0.69545984", "0.69545984", "0.69545984", "0.69545984", "0.69545984", "0.69545984", "0.69545984", "0.69545984", "0.69545984", "0.6945448", "0.6941314", "0.69375795", "0.69284433", "0.6927765", "0.6926225", "0.6924957", "0.68977845", "0.68903667", "0.6888921", "0.6884899", "0.6881634", "0.6881634", "0.6881634", "0.6881634", "0.6881634", "0.6881634", "0.6881634", "0.6881634", "0.6881634", "0.6881634", "0.6881634", "0.6881634", "0.6881634", "0.6881634", "0.6881634", "0.6881634", "0.6881634", "0.6881634", "0.6881634", "0.6881634", "0.6881634", "0.6881634", "0.6881634", "0.6881634", "0.6881634", "0.6881634", "0.6881634", "0.6881634", "0.68810374", "0.6880909", "0.6875918", "0.68446535", "0.68419653", "0.6827102", "0.6825376", "0.68205744", "0.6810749", "0.6810749", "0.6810749", "0.6810749", "0.6801166", "0.67984277", "0.67804384", "0.67599255", "0.6741211", "0.6736925", "0.6734706", "0.67305225", "0.6714831", "0.6688717", "0.66845196", "0.66810054", "0.6675035", "0.6662443", "0.66591614" ]
0.0
-1
PATCH/PUT /products/1 PATCH/PUT /products/1.json
def update respond_to do |format| if @product.update(product_params) format.html { redirect_to @product, notice: t('update_success') } format.json { render :show, status: :ok, location: @product } else format.html { render :edit } format.json { render json: @product.errors, status: :unprocessable_entity } end end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update\n begin\n @api_v1_product.update!(api_v1_product_params)\n head :no_content\n rescue => ex\n json_response({error: ex.message}, :unprocessable_entity)\n end\n end", "def update\n if @product.update(product_params)\n render json: @product, status: :ok#, location: @collection\n else\n render json: @product.errors, status: :unprocessable_entity\n end\n end", "def update\n updateProduct = Product.find_by_id(params[:id])\n updateProduct.update(products_params)\n if updateProduct != nil\n msg = { status: 200 , product: updateProduct }\n respond_to do |format|\n format.html { render json: msg }\n format.json { render json: msg }\n end\n else\n msg = { status: 422 }\n respond_to do |format|\n format.html { render json: msg }\n format.json { render json: msg }\n end\n end\n end", "def update\n return unless product_params\n render json: @product.simple_info, status: :ok if @product.update!(@product_params)\n rescue => e\n render json: { error: e }, status: :ok\n end", "def update\n @product = Product.find(params[:id])\n\n if @product.update(product_params)\n head :no_content\n else\n render json: @product.errors, status: :unprocessable_entity\n end\n end", "def update\n if @product.update(product_params)\n render json: @product\n else\n render json: @product.errors, status: :unprocessable_entity\n end\n end", "def update\n @product.assign_attributes object_params.reject{|_, v| v.blank?}\n # In a normal app we have a pre filled form of the object to update,\n # so when we do a PATCH (or PUT) we send all the attributes again,\n # in the API we permit to send any field to update, so we need to remove\n # all the blank params of the object to prevent validations triggers of\n # attributes that we don't send to update\n if @product.save\n render json: @product.to_json\n else\n render json: @product.errors, status: :unprocessable_entity\n end\n end", "def patch\n headers = {\"If-Match\" => @version}\n response = @context.request :patch, \"#{@path}/#{@id}\", @data.to_json, headers\n @version += 1\n response\n # 'X-HTTP-Method-Override' => 'PATCH'\n end", "def update\n product = Product.find(params[:id])\n product_details = params.permit(:title, :inventory_count, :price)\n\n product.update(product_details)\n\n render json: product\n end", "def update\n \n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @record = Product.find(params[:id])\n @record.update_attributes(params[:product])\n \n respond_to do |format|\n format.json {\n render json: {}\n }\n end\n end", "def update\n respond_to do |format|\n if @product.update!(product_params)\n format.html { redirect_to products_url, notice: 'Product was successfully updated.' }\n format.json { render json: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: t(:product_updated) }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n \n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, :notice => 'Product was successfully updated.' }\n format.json { head :ok }\n else\n format.html { render :action => \"edit\" }\n format.json { render :json => @product.errors, :status => :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, :notice => 'Product was successfully updated.' }\n format.json { head :ok }\n else\n format.html { render :action => \"edit\" }\n format.json { render :json => @product.errors, :status => :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to products_path, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, :notice => 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render :action => \"edit\" }\n format.json { render :json => @product.errors, :status => :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, :notice => 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render :action => \"edit\" }\n format.json { render :json => @product.errors, :status => :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product,\n :notice=> 'Product was successfully updated.' }\n format.json { head :ok }\n else\n format.html { render :action=> \"edit\" }\n format.json { render :json=> @product.errors,\n :status=> :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n if @product.update_attributes(params[:product])\n respond_to do |format|\n format.html { redirect_to products_path, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n end\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\t\trespond_to do |format|\n\t\t if @product.update_attributes(params[:product])\n\t\t\tif @product.photo.nil?\n\t\t\t\tphoto = Photo.find_by_product_id(@product.id)\n\t\t\t\[email protected]_attributes(:photo_id => photo.id) if !photo.nil?\n\t\t\tend\n\t\t\tformat.html { redirect_to @product, :notice => 'Успешно обновлено' }\n\t\t\tformat.json { head :no_content }\n\t\t else\n\t\t\tformat.html { render :action => \"edit\" }\n\t\t\tformat.json { render :json => @product.errors, :status => :unprocessable_entity }\n\t\t end\n\t\tend\n end", "def update\n @product = Product.find(params[:id])\n @product.name_prefix = @product.name.first.upcase\n respond_to do |format|\n if @product.update_attributes(params[:product])\n\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to edit_product_path(@product), notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product.update(product_params)\n set_products\n end", "def update\n if @product.update(product_params)\n render :show, status: :ok, location: api_v1_product_path(@product)\n else\n render json: @product.errors, status: :unprocessable_entity\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n respond_to do |format|\n @product.edit\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to action: 'show', notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def editProd()\n if(!authenticateAdmin(params[:admin_id], params[:admin_auth_key]))\n render json: {status: false, reason: \"Authentication Failed\", data: \"\"}\n return\n end\n p = Product.find(params[:id])\n status = p.update(name: params[:name], price: params[:price].to_f, category_id: params[:cat_id])\n error = \"\"\n if(p.errors.full_messages.count > 0)\n error = c.errors.full_messages[0]\n end\n render json: {status: status, reason: error, data: \"\"}\n end", "def update\n @product = Product.eager_loading.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to shop_products_path(@product.shop.uuid), notice: 'Product was successfully updated.' }\n format.json { render json: @product.to_json(:include => {:product_variants => {:include => [:option_types,:pictures]}})}\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n #Find product by productID\n @product = Product.find_by(productID: params[:id])\n \n respond_to do |format|\n if @product.update_attributes(product_params)\n format.html { redirect_to products_path, notice: 'Product has been updated.' }\n format.json { render :index, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @angular = Product.find(params[:id])\n \n @angular.update_attributes(title: params[:products][:title], description: params[:products][:description])\n respond_to do |format|\n if @angular.valid?\n format.html { redirect_to store_index_path, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @angular.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n if @product\n if @product.update(price: params[:price])\n render 'api/products/show'\n else\n render json: [\"Can only update price\"], status: 422\n end\n else\n render json: [\"Product not found\"], status: 422\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: t('.message') }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.xml { head :ok }\n format.json { head :ok }\n else\n format.html { render action: \"edit\" }\n format.xml { render :xml => @product.errors, :status => :unprocessable_entity }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n \n end", "def set_api_v1_product\n begin\n @api_v1_product = Product.find(params[:id])\n rescue => ex\n json_response({error: ex.message}, :not_found)\n end\n end", "def update # PATCH\n raise NotImplementedError\n end", "def update\n respond_to do |format|\n if @product1.update(product1_params)\n format.html { redirect_to @product1, notice: \"Product1 was successfully updated.\" }\n format.json { render :show, status: :ok, location: @product1 }\n else\n format.html { render :edit, status: :unprocessable_entity }\n format.json { render json: @product1.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok}\n else\n format.html { render :edit }\n format.json { render json: @product.errors.full_messages, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Your product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product_spec.update(product_spec_params)\n format.html { redirect_to @product_spec, notice: 'Product spec was successfully updated.' }\n format.json { render :show, status: :ok, location: @product_spec }\n else\n format.html { render :edit }\n format.json { render json: @product_spec.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: \"Product was successfully updated.\" }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def patch!\n request! :patch\n end", "def update\n @product = @person.products.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:model])\n flash[:notice] = 'Product was successfully updated.'\n format.json { render :json=>nil }\n else\n format.json { render :json => @product.errors, :status => :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end" ]
[ "0.7269931", "0.6935652", "0.68690825", "0.6846676", "0.68126076", "0.67678404", "0.6749974", "0.6741848", "0.67151767", "0.6700884", "0.6686023", "0.66597176", "0.6654553", "0.66536564", "0.664067", "0.664067", "0.66382414", "0.6631012", "0.6631012", "0.6627257", "0.6620688", "0.6603794", "0.6603763", "0.6603763", "0.6603763", "0.6603763", "0.6603763", "0.6603763", "0.6603763", "0.6603763", "0.6603763", "0.6603763", "0.6603763", "0.6603763", "0.6603763", "0.6603763", "0.6603763", "0.6603763", "0.6603763", "0.6602732", "0.65937763", "0.6593595", "0.6587338", "0.65602255", "0.6557081", "0.6557081", "0.6557081", "0.6557081", "0.6557081", "0.65562665", "0.6540051", "0.65274733", "0.6498262", "0.6490972", "0.6485979", "0.6477706", "0.6475468", "0.6473973", "0.6454957", "0.6453377", "0.6441549", "0.643574", "0.64351535", "0.642971", "0.64243215", "0.6415268", "0.64094204", "0.64077", "0.6406827", "0.6405912", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632", "0.6403632" ]
0.6420181
65
DELETE /products/1 DELETE /products/1.json
def destroy @product.destroy respond_to do |format| format.html { redirect_to products_url, notice: t('destroy_success') } format.json { head :no_content } end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def destroy\n product = Product.find(params[:id])\n product.destroy\n\n render json: { deleted: params[:id] }\n end", "def delete_product(name)\n delete(\"/apiproducts/#{name}\")\n end", "def destroy\n p @product.destroy!\n render json: { result: 'deleted' }, status: :ok\n end", "def destroy\n @product.destroy\n render json: {}\n end", "def destroy\n @product.destroy\n\n render json: @product, status: :ok#, location: @collection\n end", "def delete_product(id)\n @client.raw('delete', \"/ecommerce/products/#{id}\")\n end", "def destroy\n product = Product.find(params[:id])\n product.destroy\n render json: {id: product.id}\n end", "def destroy\n @product = Product.find(params[:id])\n @product.delete!\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def delete(options = nil)\n request = Request.new(@client)\n path = \"/products/\" + CGI.escape(@id) + \"\"\n data = {\n\n }\n\n response = Response.new(request.delete(path, data, options))\n return_values = Array.new\n \n return_values.push(response.success)\n\n \n return_values[0]\n end", "def destroy\n unread\n\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n begin\n @api_v1_product.destroy!\n head :no_content\n rescue => ex\n json_response({error: ex.message}, :unprocessable_entity)\n end\n end", "def destroy\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :ok }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :ok }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :ok }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to root_url }\n format.json { head :no_content }\n end\n end", "def destroy\n puts(\"you are in destroy \")\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n render json: {is_success: true, error_code: 200, message: \"Deleted Successfully\", result: @product}, status: 200\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to root_url(:item => 3), notice: 'Product was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @record = Product.find(params[:id])\n @record.trash\n respond_to do |format|\n format.json { head :no_content }\n end\n end", "def deleteProd()\n if(!authenticateAdmin(params[:admin_id], params[:admin_auth_key]))\n render json: {status: false, reason: \"Authentication Failed\", data: \"\"}\n return\n end\n p = Product.find(params[:id])\n status = p.destroy\n error = \"\"\n if(p.errors.full_messages.count > 0)\n error = c.errors.full_messages[0]\n end\n render json: {status: true, reason: error, data: \"\"}\n end", "def destroy\n @product1.destroy\n respond_to do |format|\n format.html { redirect_to product1s_url, notice: \"Product1 was successfully destroyed.\" }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to admin_products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @onecompany_product = Onecompany::Product.find(params[:id])\n @onecompany_product.destroy\n\n respond_to do |format|\n format.html { redirect_to onecompany_products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @item.destroy\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url, notice: t('.message') }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to admin_products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to admin_products_url }\n format.json { head :no_content }\n end\n end", "def delete\n render json: Item.delete(params[\"id\"])\n end", "def destroy\n @producto = Producto.find(params[:id])\n @producto.destroy\n\n respond_to do |format|\n format.html { redirect_to productos_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @producto = Producto.find(params[:id])\n @producto.destroy\n\n respond_to do |format|\n format.html { redirect_to productos_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @producto = Producto.find(params[:id])\n @producto.destroy\n\n respond_to do |format|\n format.html { redirect_to productos_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url, notice: 'Product was successfully deleted.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\nend", "def destroy \n @product = current_user.user_info.products.find(params[:id])\n Shopify.delete @product\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id]).delete\n respond_to do |format|\n if @product.delete\n format.html {redirect_to @product, notice: \"Product was successfully deleted.\" }\n else\n format.json {render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url, notice: 'Your product was successfully deleted.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @shopifyproduct = Shopifyproduct.find(params[:id])\n @shopifyproduct.destroy\n\n respond_to do |format|\n format.html { redirect_to shopifyproducts_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = @person.products.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.json { render :json=>true }\n end\n end", "def destroy\n #@product = Product.find(params[:id]) #하단에서 미리 선언\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n respond_to do |format|\n format.html { redirect_to(products_url) }\n format.xml { head :ok }\n format.json { head :ok }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n\n head :no_content\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to root_path, notice: 'Product was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @admin_product.destroy\n respond_to do |format|\n format.html { redirect_to admin_products_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url, notice: 'Продукт успешно удален.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @product = Product.find(params[:id])\n @product.destroy\n\n head :no_content\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url, notice: \"Product was successfully destroyed.\" }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url, notice: \"Product was successfully destroyed.\" }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url, notice: \"Product was successfully destroyed.\" }\n format.json { head :no_content }\n end\n end", "def destroy\n @prod = Prod.find(params[:id])\n @prod.destroy\n\n respond_to do |format|\n format.html { redirect_to prods_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url, notice: 'Product was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url, notice: 'Product was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url, notice: 'Product was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url, notice: 'Product was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url, notice: 'Product was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url, notice: 'Product was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url, notice: 'Product was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url, notice: 'Product was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url, notice: 'Product was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url, notice: 'Product was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url, notice: 'Product was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url, notice: 'Product was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url, notice: 'Product was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url, notice: 'Product was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @product.destroy\n respond_to do |format|\n format.html { redirect_to products_url, notice: 'Product was successfully destroyed.' }\n format.json { head :no_content }\n end\n end" ]
[ "0.7716745", "0.7592962", "0.7548045", "0.7501879", "0.75009406", "0.7474183", "0.743948", "0.74176705", "0.74075043", "0.7369892", "0.7352832", "0.7347189", "0.73354656", "0.73354656", "0.73354656", "0.73301345", "0.73301345", "0.73301345", "0.73301345", "0.73301345", "0.73301345", "0.73128235", "0.73128235", "0.73128235", "0.73128235", "0.73128235", "0.73128235", "0.73128235", "0.73128235", "0.73128235", "0.73128235", "0.73128235", "0.73128235", "0.73128235", "0.73128235", "0.73128235", "0.73128235", "0.73128235", "0.73128235", "0.73128235", "0.73128235", "0.73128235", "0.73128235", "0.73128235", "0.73128235", "0.73128235", "0.73128235", "0.73128235", "0.7312593", "0.72985035", "0.72789305", "0.7265428", "0.7264302", "0.7258882", "0.72525805", "0.72503394", "0.72286373", "0.7225069", "0.72190267", "0.72032017", "0.7202079", "0.7202079", "0.72007", "0.71877134", "0.71877134", "0.71877134", "0.7185265", "0.7183546", "0.7170181", "0.71685195", "0.7156565", "0.7151217", "0.71456236", "0.7142008", "0.7135612", "0.712974", "0.7127146", "0.7124946", "0.7124206", "0.71106845", "0.7109525", "0.70784783", "0.70784783", "0.70784783", "0.70779014", "0.7075581", "0.7075581", "0.7075581", "0.7075581", "0.7075581", "0.7075581", "0.7075581", "0.7075581", "0.7075581", "0.7075581", "0.7075581", "0.7075581", "0.7075581", "0.7075581", "0.7075581" ]
0.72143084
59
Use callbacks to share common setup or constraints between actions.
def set_product @product = Product.find(params[:id]) end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_required_actions\n # TODO: check what fields change to asign required fields\n end", "def action_hook; end", "def run_actions; end", "def define_action_hook; end", "def actions; end", "def define_action_helpers\n if super && action == :save\n @instance_helper_module.class_eval do\n define_method(:valid?) do |*args|\n self.class.state_machines.fire_event_attributes(self, :save, false) { super(*args) }\n end\n end\n end\n end", "def add_actions; end", "def callbacks; end", "def callbacks; end", "def setup *actions, &proc\n (@setup_procs ||= []) << [proc, actions.size > 0 ? actions : [:*]]\n end", "def define_action_helpers; end", "def post_setup\n end", "def action_methods; end", "def action_methods; end", "def action_methods; end", "def before_setup; end", "def action_run\n end", "def execute(setup)\n @action.call(setup)\n end", "def define_action_helpers?; end", "def set_actions\n actions :all\n end", "def action_done(action)\n dispatch = { :migrate => :done_migrating, :map => :done_mapping, :reduce =>\n :done_reducing, :finalize => :done_finalizing } \n self.send dispatch[action[:action]], action\n end", "def dependencies action, &block\n @actions.each do |other|\n if action[:requires].include? other[:provide]\n block.call other\n end\n end\n end", "def setup!\n return unless @setup_procs\n http_actions = actions\n @setup_procs.each do |setup_proc|\n proc, actions = setup_proc\n @setup__actions = actions.map do |action|\n\n action.is_a?(Regexp) ?\n http_actions.select { |a| a.to_s =~ action } :\n action.is_a?(String) && action =~ /\\A\\./ ?\n http_actions.map { |a| a.to_s << action if format?(a).include?(action) }.compact :\n action\n\n end.flatten\n self.class_exec &proc\n @setup__actions = nil\n end\n @setup_procs = nil\n end", "def before_actions(*logic)\n self.before_actions = logic\n end", "def setup_handler\n end", "def set_action(opts)\n opts = check_params(opts,[:actions])\n super(opts)\n end", "def setup(action)\n @targets.clear\n unless action.item.target_filters.empty?\n @targets = SES::TargetManager.make_targets(action)\n else\n item = action.item\n if item.for_opponent?\n @targets = $game_troop.alive_members\n elsif item.for_dead_friend?\n @targets = $game_party.battle_members.select { |actor| actor.dead? }\n else\n $game_party.battle_members.select { |actor| actor.alive? }\n end\n end\n @item_max = @targets.size\n create_contents\n refresh\n show\n activate\n end", "def action; end", "def action; end", "def action; end", "def action; end", "def action; end", "def workflow\n end", "def revisable_shared_setup(args, block)\n class << self\n attr_accessor :revisable_options\n end\n options = args.extract_options!\n self.revisable_options = Options.new(options, &block)\n \n self.send(:include, Common)\n self.send(:extend, Validations) unless self.revisable_options.no_validation_scoping?\n self.send(:include, WithoutScope::QuotedColumnConditions)\n end", "def setup\n @action = SampleActionAndroid.new(os_name: 'android',\n app_name: APP_PATH)\n end", "def before(action)\n invoke_callbacks *self.class.send(action).before\n end", "def process_action(...)\n send_action(...)\n end", "def before_dispatch(env); end", "def after_actions(*logic)\n self.after_actions = logic\n end", "def setup\n # override and do something appropriate\n end", "def setup(client)\n return unless @setup\n actions = @setup['setup'].select { |action| action['do'] }.map { |action| Action.new(action['do']) }\n actions.each do |action|\n action.execute(client)\n end\n self\n end", "def setup(_context)\n end", "def setup(resources) ; end", "def validate_actions\n errors.add(:base, :should_give_at_least_one_action) if !manage? && !forecasting? && !read? && !api?\n end", "def setup\n @resource_config = {\n :callbacks => {\n :before_create => nil,\n :after_create => nil,\n :before_update => nil,\n :after_update => nil,\n :before_destroy => nil,\n :after_destroy => nil,\n },\n :child_assoc => nil,\n :model => nil,\n :parent => nil,\n :path => nil,\n :permission => {},\n :properties => {},\n :relation => {\n :create => nil,\n :delete => nil,\n },\n :roles => nil,\n }\n end", "def determine_valid_action\n\n end", "def process_shared\n handle_taxes\n handle_shippings\n create_adjustments_from_params\n handle_status\n handle_inventory_refunds\n handle_payment_transactions\n order.updater.update\n end", "def startcompany(action)\n @done = true\n action.setup\n end", "def init_actions\n am = action_manager()\n am.add_action(Action.new(\"&Disable selection\") { @selection_mode = :none; unbind_key(32); bind_key(32, :scroll_forward); } )\n am.add_action(Action.new(\"&Edit Toggle\") { @edit_toggle = !@edit_toggle; $status_message.value = \"Edit toggle is #{@edit_toggle}\" })\n end", "def event_callbacks(event, metadata={})\n case event\n when :reset, :review\n if confirmed\n update_attributes(confirmed: false)\n end\n when :confirm\n confirm\n # trigger :order for all applicable items\n # NOTE: :order event is common to both physical and digital items\n items.each do |i|\n if i.event_permitted(:order)\n user_id = last_transition.user_id\n i.trigger!(:order, { order_id: id, user_id: user_id })\n end\n end\n when :complete_work\n request = metadata[:request]\n work_complete_notification(request)\n when :close\n close\n end\n if event != :close && !open\n reopen\n end\n end", "def setup_action\n return unless PONY::ERRNO::check_sequence(current_act)\n new_sequence = @action_sequence[@sequence_index+1...@action_sequence.size]\n @sequence_index = 0\n new_sequence = DND::SkillSequence::ACTS[@acts[1]] + new_sequence\n execute_sequence\n end", "def define_tasks\n define_weave_task\n connect_common_tasks\n end", "def setup(&block)\n define_method(:setup, &block)\n end", "def setup\n transition_to(:setup)\n end", "def setup\n transition_to(:setup)\n end", "def action\n end", "def setup( *args )\n\t\t\tself.class.setupBlocks.each {|sblock|\n\t\t\t\tdebugMsg \"Calling setup block method #{sblock}\"\n\t\t\t\tself.send( sblock )\n\t\t\t}\n\t\t\tsuper( *args )\n\t\tend", "def config(action, *args); end", "def setup\n @setup_proc.call(self) if @setup_proc\n end", "def before_action \n end", "def setup_callbacks\n defined_callbacks.each do |meth|\n unless respond_to?(\"call_#{meth}_callbacks\".to_sym)\n self.class.module_eval <<-EOE\n def call_#{meth}_callbacks(*args)\n plugin_store.each {|a| a.call_#{meth}_callbacks(*args) } if respond_to?(:plugin_store) && plugin_store\n self.send :#{meth}, *args if respond_to?(:#{meth})\n end\n EOE\n end\n end\n end", "def action\n end", "def matt_custom_action_begin(label); end", "def setup\n # override this if needed\n end", "def setup\n\t\t\t\t\t\t# Do nothing\n\t\t\t\tend", "def setup\n\t\t\t\t\t\t# Do nothing\n\t\t\t\tend", "def action(options,&callback)\n new_action = Action===options ? options : Action.new(options,&callback)\n # replace any with (shared name/alias or both default) + same arity\n @actions.delete_if do |existing_action|\n ((existing_action.names & new_action.names).size > 0 ||\n existing_action.default? && new_action.default?) &&\n existing_action.required.size == new_action.required.size &&\n existing_action.optional.size <= new_action.optional.size\n end\n @actions = (@actions + [new_action]).sort\n new_action\n end", "def set_target_and_action target, action\n self.target = target\n self.action = 'sugarcube_handle_action:'\n @sugarcube_action = action\n end", "def after(action)\n invoke_callbacks *options_for(action).after\n end", "def pre_task\n end", "def setup(server)\n server.on('beforeMethod', method(:before_method), 10)\n end", "def add_actions\n attribute = machine.attribute\n name = self.name\n \n owner_class.class_eval do\n define_method(name) {self.class.state_machines[attribute].events[name].fire(self)}\n define_method(\"#{name}!\") {self.class.state_machines[attribute].events[name].fire!(self)}\n define_method(\"can_#{name}?\") {self.class.state_machines[attribute].events[name].can_fire?(self)}\n end\n end", "def init_actions\n @select_action = SelectAction.new\n @endpoint_mouse_action = EndpointMouseAction.new\n @move_action = MoveAction.new\n end", "def setup_signals; end", "def after_created\r\n return unless compile_time\r\n Array(action).each do |action|\r\n run_action(action)\r\n end\r\nend", "def after_created\r\n return unless compile_time\r\n Array(action).each do |action|\r\n run_action(action)\r\n end\r\nend", "def set_target_and_action target, action\n self.target = target\n self.action = 'sugarcube_handle_action:'\n @sugarcube_action = action.respond_to?('weak!') ? action.weak! : action\n end", "def initialize(*args)\n super\n @action = :set\nend", "def after_set_callback; end", "def setup\n #implement in subclass;\n end", "def lookup_action; end", "def setup &block\n if block_given?\n @setup = block\n else\n @setup.call\n end\n end", "def setup_action\n return TSBS.error(@acts[0], 1, @used_sequence) if @acts.size < 2\n actions = TSBS::AnimLoop[@acts[1]]\n if actions.nil?\n show_action_error(@acts[1])\n end\n @sequence_stack.push(@acts[1])\n @used_sequence = @acts[1]\n actions.each do |acts|\n @acts = acts\n execute_sequence\n break if @break_action\n end\n @sequence_stack.pop\n @used_sequence = @sequence_stack[-1]\n end", "def release_actions; end", "def around_hooks; end", "def save_action; end", "def setup(easy)\n super\n easy.customrequest = @verb\n end", "def action_target()\n \n end", "def setup\n callback(:setup) do\n notify(:setup)\n migration_check.last_deployed_commit\n end\n end", "def setup\n return unless @setup\n\n actions = @setup['setup'].select { |action| action['do'] }.map { |action| Action.new(action['do']) }\n run_actions_and_retry(actions)\n self\n end", "def before_setup\n # do nothing by default\n end", "def my_actions(options)\n @setup = false\n get_template_part(\"custom_used\",\"action_users\",true)\n end", "def default_action; end", "def setup(&blk)\n @setup_block = blk\n end", "def callback_phase\n super\n end", "def advice\n end", "def _handle_action_missing(*args); end", "def duas1(action)\n action.call\n action.call\nend", "def shared_action(name, &block)\n @controller.shared_actions[name] = block\n end", "def before_action action, &block\n @audience[:before][action] ||= Set.new\n @audience[:before][action] << block\n end", "def setup_initial_state\n\n state_a = State.new(\"a\", 0)\n state_b = State.new(\"b\", 0)\n state_c = State.new(\"c\", 10)\n\n move_to_b = Action.new(\"move_to_b\", 1, state_b)\n\n move_to_c = Action.new(\"move_to_c\", 1, state_c)\n\n state_a.actions = [move_to_b, move_to_c]\n\n return state_a\n \nend" ]
[ "0.6163163", "0.6045976", "0.5946146", "0.591683", "0.5890051", "0.58349305", "0.5776858", "0.5703237", "0.5703237", "0.5652805", "0.5621621", "0.54210985", "0.5411113", "0.5411113", "0.5411113", "0.5391541", "0.53794575", "0.5357573", "0.53402257", "0.53394014", "0.53321576", "0.53124547", "0.529654", "0.5296262", "0.52952296", "0.52600986", "0.52442724", "0.52385926", "0.52385926", "0.52385926", "0.52385926", "0.52385926", "0.5232394", "0.523231", "0.5227454", "0.52226824", "0.52201617", "0.5212327", "0.52079266", "0.52050185", "0.51754695", "0.51726824", "0.51710224", "0.5166172", "0.5159343", "0.51578903", "0.51522785", "0.5152022", "0.51518047", "0.51456624", "0.51398855", "0.5133759", "0.5112076", "0.5111866", "0.5111866", "0.5110294", "0.5106169", "0.509231", "0.50873137", "0.5081088", "0.508059", "0.50677156", "0.50562143", "0.5050554", "0.50474834", "0.50474834", "0.5036181", "0.5026331", "0.5022976", "0.5015441", "0.50121695", "0.5000944", "0.5000019", "0.4996878", "0.4989888", "0.4989888", "0.49864885", "0.49797225", "0.49785787", "0.4976161", "0.49683493", "0.4965126", "0.4958034", "0.49559742", "0.4954353", "0.49535993", "0.4952725", "0.49467874", "0.49423352", "0.49325448", "0.49282882", "0.49269363", "0.49269104", "0.49252945", "0.4923091", "0.49194667", "0.49174926", "0.49173003", "0.49171105", "0.4915879", "0.49155936" ]
0.0
-1
Never trust parameters from the scary internet, only allow the white list through.
def product_params params.require(:product).permit(:name, :soil, :utilization, :active, :photo, :description, :cycle_id, :purpose_ids =>[], :products_purposes_relation_ids =>[], :cultivation_ids =>[]) end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strong_params\n params.require(:user).permit(param_whitelist)\n end", "def strong_params\n params.require(:listing_member).permit(param_whitelist)\n end", "def allow_params_authentication!; end", "def allowed_params\n ALLOWED_PARAMS\n end", "def default_param_whitelist\n [\"mode\"]\n end", "def param_whitelist\n [:role, :title]\n end", "def expected_permitted_parameter_names; end", "def safe_params\n params.except(:host, :port, :protocol).permit!\n end", "def strong_params\n params.require(:team_member).permit(param_whitelist)\n end", "def permitir_parametros\n \t\tparams.permit!\n \tend", "def strong_params\n params.require(:community).permit(param_whitelist)\n end", "def permitted_strong_parameters\n :all #or an array of parameters, example: [:name, :email]\n end", "def strong_params\n params.require(:education).permit(param_whitelist)\n end", "def restricted_params\n #params.require(self.controller_name.classify.underscore.to_sym).permit([])\n raise(\"No strong params set, override restricted_params method in your controller. E.g. params.require(:model).permit(:attribute1, :attribute2)\")\n end", "def allowed_params\n params.require(:user).permit(:username, :email, :password, :password_confirmation)\n end", "def param_whitelist\n [:rating, :review]\n end", "def param_whitelist\n whitelist = [\n :username, :name,\n :parent_id,\n :headline, :description, :video,\n :policy, :signup_mode, :category,\n :website, :facebook, :twitter, :linkedin,\n :founded_at,\n privacy: [\n :events,\n :resources\n ],\n permission: [\n :profile,\n :members,\n :children,\n :statistics,\n :posts,\n :listings,\n :resources,\n :events\n ],\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n \n if action_name === 'update'\n whitelist.delete(:parent_id)\n unless current_user.role_in(@community) === 'owner'\n whitelist.delete(:privacy)\n whitelist.delete(:permission)\n end\n end\n \n whitelist\n end", "def param_whitelist\n if @user.present? && current_user != @user\n return [:followed]\n end\n \n whitelist = [\n :username, :email, :password,\n :first_name, :last_name,\n :birthday, :gender,\n :headline, :biography, :ask_about, :focus,\n :website, :facebook, :linkedin, :twitter, :github,\n roles: [],\n skills: [],\n interests: [],\n privacy: { contact: [] },\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n \n if action_name === 'update'\n whitelist.delete(:email)\n whitelist.delete(:password)\n end\n \n whitelist\n end", "def user_params \n \tparams.require(:user).permit(:name, :email, :password, :password_confirmation)# preventing CSTR\n end", "def user_params\n params.permit(:name, :phoneNumber, :address, :postalCode, :local, :link, :counter, :latitude, :longitude) \n end", "def valid_params_request?; end", "def strong_params\n params.require(:experience).permit(param_whitelist)\n end", "def trim_whitelisted(params, whitelist)\n # remove any parameters that are not whitelisted\n params.each do |key, value|\n # if white listed\n if whitelist.include? key\n # strip the parameters of any extra spaces, save as string\n params[key] = value.to_s.strip\n else\n # delete any unauthorized parameters\n params.delete key\n end\n end\n params\n end", "def whitelist_url_params\n params.require(:whitelist_url).permit(:domain)\n end", "def allowed_params\n params.require(:allowed).permit(:email)\n end", "def permitted_params\n []\n end", "def trim_whitelisted(params, whitelist)\n # remove any parameters that are not whitelisted\n params.each do |key, value|\n # if white listed\n if whitelist.include? key\n # strip the parameters of any extra spaces, save as string\n params[key] = value.to_s.strip\n else\n # delete any unauthorized parameters\n params.delete key\n end\n end\n params\n end", "def safe_params\n params.permit(:id, :name, :origin, :emails => []); #emails is an array\n end", "def query_param\n\t\tparams.permit(:first_name, :last_name, :phone)\n\tend", "def strong_params\n params.require(:success_metric).permit(param_whitelist)\n end", "def devise_filter\r\n logger.debug(\"In devise_filter =>PARAMS: #{params.inspect}\")\r\n\r\n # White list for sign_up\r\n devise_parameter_sanitizer.for(:sign_up) { |u| u.permit(user_whitelist) }\r\n\r\n # White list for account update\r\n devise_parameter_sanitizer.for(:account_update) { |u| u.permit(user_whitelist, :current_password) }\r\n\r\n # White list for Invitation creation\r\n devise_parameter_sanitizer.for(:invite) { |u| u.permit(:account_type, :email, :invitation_token)}\r\n\r\n # White list for accept invitation\r\n devise_parameter_sanitizer.for(:accept_invitation) { |u| u.permit(user_whitelist, :invitation_token)}\r\n\r\n end", "def whitelisted_user_params\n params.require(:user).\n permit( :first_name, :last_name, :email,:password,:password_confirmation,:birthday,:gender)\n end", "def user_params\n ActionController::Parameters.permit_all_parameters = true\n params.require(:user) #.permit(:name, :surname, :phone, :password, :email, :time_zone)\n end", "def strong_params\n params.require(:metric_change).permit(param_whitelist)\n end", "def safe_params\n params.require(:user).permit(:name)\n end", "def get_params\n\t\treturn ActionController::Parameters.new(self.attributes).permit(\"account_id\", \"title\", \"category\", \"introduction\", \"tags\", \"segment_type\", \"visible\", \"status\", \"main_image\")\n\tend", "def grant_params\n @whitelisted = params.require(:grant).permit(:name, :description, :agency_id, :acronym)\n end", "def check_params; true; end", "def param_whitelist\n whitelist = [\n :description,\n :progress,\n :kpi_id\n ]\n \n unless action_name === 'create'\n whitelist.delete(:kpi_id)\n end\n \n whitelist\n end", "def quote_params\n params.permit!\n end", "def valid_params?; end", "def paramunold_params\n params.require(:paramunold).permit!\n end", "def user_params\n\t\tparams.permit(:nickname, :avatar, :description, :password, :gender, :birthday, :email, :phone, :qq_id, :wechat_id)\n\tend", "def filtered_parameters; end", "def user_params\n params.permit(\n \t:id,\n \t:email, \n \t:first_name, \n \t:last_name, \n \t:password, \n \t:confirm_token, \n \t:phone_number,\n \t:facebook_link,\n \t:car_model,\n \t:license_plate)\n end", "def filtering_params\n params.permit(:email, :name)\n end", "def check_params\n true\n end", "def wx_public_params\n params.require(:wx_public).permit(:nickname, :manager, :alias)\n end", "def allowed_params\n params.require(:user).permit(:email, :password, :role, :first_name, :last_name, :password_confirmation)\n end", "def allowed_params\n params.require(:user).permit(:email, :password, :role, :first_name, :last_name, :password_confirmation)\n end", "def listing_params\n\t\tparams.permit(:address, :transit_info, :rules, :other_info, :lat, :lng)\n\tend", "def social_account_params\n\t\t\tparams.require(:social_account).permit!\n\t\tend", "def safe_params\n resurce_name = self.class.resource_name\n params_method_name = \"#{resurce_name}_params\".to_sym\n if params[resurce_name]\n if respond_to?(params_method_name) || private_methods.include?(params_method_name)\n send(params_method_name)\n else\n raise ActiveModel::ForbiddenAttributesError, \"Please, define the '#{params_method_name}' method in #{self.class.name}\"\n end\n end\n end", "def url_params\n params.require(:url).permit(:short_url, :original_url, :clicks, :ip_addresses)\n end", "def user_params\n params.require(:user).permit(:uri, :username, :password, :realname, :email, :publicvisible)\n end", "def model_params\n\t\tparams.require(:manager).permit(\n\t :user_name,\n :password,\n :email,\n \t\t\t)\n\tend", "def article_params_whitelist\n params.require(:article).permit(:title, :description, category_ids: [])\n end", "def college_whitelist_params\n params.require(:college_whitelist).permit(:status)\n end", "def active_code_params\n params[:active_code].permit\n end", "def filtering_params\n params.permit(:email)\n end", "def valid_params(params)\n params.permit(:user_id, :photo_id, :originX, :originY, :width, :height)\n end", "def ip_address_params\n\t\t\tparams.require(:ip_address).permit!\n end", "def pull_request_params\n whitelist = [\n :url,\n :id,\n :html_url,\n :diff_url,\n :patch_url,\n :issue_url,\n :number,\n :state,\n :locked,\n :title\n ]\n params.require(:pull_request).permit(whitelist)\n end", "def reserved_params\n params.require(:reserved).permit(:name, :email, :pax, :address, :KTP, :title)\n end", "def post_params\n if current_user.admin? \n params.permit(:title, :body, :city, :country, :gps_location, :privacy, :visible, :latitude, :longitude, images: [], files: [])\n else \n params.permit(:title, :body, :city, :country, :gps_location, :privacy,:latitude, :longitude, images: [], files: [])\n end \n end", "def list_params\n params.permit(:name)\n end", "def filter_parameters; end", "def filter_parameters; end", "def vineyard_params\n params.permit(:vineyard_name, :email, :website_url, :phone, :address, :city, :region, :postcode, :country, :specialty, :description, :pet_friendly, :holiday, :tours, :events, :family_friendly, :cover_image, :image_one, :image_two, :image_three, :image_four, :user_id, :base64)\n end", "def available_activity_params\n # params.require(:available_activity).permit(:type,:geometry,:properties)\n whitelisted = ActionController::Parameters.new({\n type: params.require(:available_activity)[:type],\n geometry: params.require(:available_activity)[:geometry].try(:permit!).to_h,\n properties: params.require(:available_activity)[:properties].try(:permit!).to_h\n }).try(:permit!)\n end", "def user_params\n params.permit(:name, :username, :email, :password, :img_url, :bg_url, :coinbank)\n end", "def user_params_pub\n\t \tparams[:user].permit(:hruid)\n\t end", "def user_params\n params.permit(:id, :email, :password, :nickname, :status, :avatar, :flat_picture, :flatsharing_id, :member,\n :user, :color, :solde)\n end", "def validate_search_inputs\n @whitelisted = params.fetch(:user, nil)\n if @whitelisted.blank?\n render_error(400, \"#{I18n.t('general_error.params_missing_key')}\": [I18n.t('general_error.params_missing_value', model: \"review\")])\n return\n else\n @whitelisted = @whitelisted.permit(:name, :uen, :description)\n end\n end", "def param_whitelist\n [\n :title,\n :description,\n :organization,\n :team_id,\n :started_at,\n :finished_at,\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n end", "def url_whitelist; end", "def admin_social_network_params\n params.require(:social_network).permit!\n end", "def filter_params\n params.require(:filters).permit(:letters)\n end", "def origin_params\n params.permit(:country, :state, :city, :postal_code, :address, :description)\n end", "def valid_params(params)\n params.permit(:login, :first_name, :last_name, \n :password, :password_confirmation)\n end", "def sensitive_params=(params)\n @sensitive_params = params\n end", "def permit_request_params\n params.permit(:address)\n end", "def user_params\n # Ensure a user can't give themselves admin priveleges\n params.delete(:admin) if current_user.admin?\n params.require(:user).permit(:name, :email, :admin, :image)\n end", "def secure_params\n params.require(:location).permit(:name)\n end", "def strong_params\n params.require( :setting ).\n permit( :global_scan_limit, :per_user_scan_limit,\n :target_whitelist_patterns, :target_blacklist_patterns )\n end", "def question_params\n params.require(:survey_question).permit(question_whitelist)\n end", "def case_insensitive_params\n params.require(:case_insensitive).permit(:name)\n end", "def empire_master_no_match_params\n params.require(:empire_master_no_match).permit(:uid, :last_name, :list, :search_date, :double, :source)\n end", "def maintenance_request_params\n params[:maintenance_request].permit! #allow all parameters for now\n end", "def unwanted_params\n params.require(:unwanted).permit(:title, :description, :image)\n end", "def url_params\n params[:url].permit(:full)\n end", "def backend_user_params\n params.permit!\n end", "def filter_params\n\t\treturn params[:candidate].permit(:name_for_filter)\n\tend", "def speed_measurement_params\n\n #fuckit, to lazy to deal with permit crap right now\n ActionController::Parameters.permit_all_parameters = true\n\n params[:speed_measurement]\n end", "def user_params\n params.permit(:name, :age, :username, :display_photo, :password)\n end", "def get_params\r\n #params.require(:article).permit(:title, :permalink, :content, :source_site, :introtext, :type_id, :order_by, :searchable, :created_by, :edited_by, :published_by, :published_on, :user_id)\r\n params.require(:article).permit!\r\n\r\n end", "def pub_params\n params.require(:pub).permit(:name, :description, :phone, :email, :hidden, :city_id, :address)\n end", "def pass_params\n params[:pass].permit(:name, :price, :description, :colour, :events)\n end", "def droptraining_params\n params.permit(:training_id,:user_id, :utf8, :authenticity_token, :commit)\n end", "def person_params\n # params whitelist does *not* include admin, sub, remember_token\n # TBD: share this whitelist with the list used by configuration_permitted_parameters\n # TBD: should current_password be on this list? -- for now, leaving off, since it seems to work without\n # NOTE: do not include 'admin' in this list!\n params.require(:person).permit(\n :name, \n :email, \n :description,\n :password, \n :password_confirmation\n )\n end", "def parameter_params\n params.require(:parameter).permit(:name, :description, :param_code, :param_value, :active_from, :active_to)\n end" ]
[ "0.69792545", "0.6781151", "0.67419964", "0.674013", "0.6734356", "0.6591046", "0.6502396", "0.6496313", "0.6480641", "0.6477825", "0.64565", "0.6438387", "0.63791263", "0.63740575", "0.6364131", "0.63192815", "0.62991166", "0.62978333", "0.6292148", "0.6290449", "0.6290076", "0.62894756", "0.6283177", "0.6242471", "0.62382483", "0.6217549", "0.6214457", "0.6209053", "0.6193042", "0.6177802", "0.6174604", "0.61714715", "0.6161512", "0.6151757", "0.6150663", "0.61461", "0.61213595", "0.611406", "0.6106206", "0.6105114", "0.6089039", "0.6081015", "0.6071004", "0.60620916", "0.6019971", "0.601788", "0.6011056", "0.6010898", "0.6005122", "0.6005122", "0.6001556", "0.6001049", "0.59943926", "0.5992201", "0.59909594", "0.5990628", "0.5980841", "0.59669393", "0.59589154", "0.5958826", "0.5957911", "0.5957385", "0.5953072", "0.59526145", "0.5943361", "0.59386164", "0.59375334", "0.59375334", "0.5933856", "0.59292704", "0.59254247", "0.5924164", "0.59167904", "0.59088355", "0.5907542", "0.59064597", "0.5906243", "0.5898226", "0.589687", "0.5896091", "0.5894501", "0.5894289", "0.5891739", "0.58860534", "0.5882406", "0.587974", "0.58738774", "0.5869024", "0.58679986", "0.5867561", "0.5865932", "0.5864461", "0.58639693", "0.58617616", "0.5861436", "0.5860451", "0.58602303", "0.5854586", "0.58537364", "0.5850427", "0.5850199" ]
0.0
-1
override this method to return false when this object should not be destroyed
def destroyable? true end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def destroyed?\n @destroyed ||= false\n end", "def destroyed?\n @destroyed ||= false\n end", "def destroyed?\n @destroyed ||= false\n end", "def destroyed?\n @destroyed ||= false\n end", "def destroyed?\n @destroyed == true\n end", "def destroyed?\n @destroyed == true\n end", "def destroyed?\n @destroyed == true\n end", "def destroyed?\n @destroyed == true\n end", "def destroyed?\n @destroyed == true\n end", "def destroyed?\n @destroyed\n end", "def destroyed?\n !!@destroyed\n end", "def destroyed?\n @destroyed\n end", "def destroyed?\n @destroyed\n end", "def destroyed?\n @destroyed\n end", "def destroyed?\n !!destroyed\n end", "def can_be_destroyed?\n false\n end", "def destroyed?\n super || !deleted_at.nil?\n end", "def is_destroyable?\n\t\tfalse\n\tend", "def _destroy\n marked_for_destruction?\n end", "def destroy?\n return false\n end", "def destroy\n false\n end", "def destructible?\n false\n end", "def destroyed?(obj)\n @destroyed && @destroyed[obj.object_id]\n end", "def killed?\n false\n end", "def destroyable?\n @destroyable\n end", "def destroy?\n false\n end", "def destroy?\n false\n end", "def destroy?\n attached? && !cached?\n end", "def destroyed?\n @_deleted || (!new_record? && !exist?)\n end", "def remove_before_destroy?\n false\n end", "def destroyable?\n true\n end", "def destroyable?\n true\n end", "def destroyable?\n !new_record? and !immutable? and !modified_records?\n end", "def destroyed?\n return true if !@pid && !@io_out && !@io_in && !@io_err && @main == nil\n return false\n end", "def destroy\n return super if deletable?\n\n puts_destroy_help\n\n false\n end", "def _destroy\n !!@destroy\n end", "def erased?\n false\n end", "def destroyed=(destroyed)\n @destroyed = destroyed && true\n end", "def after_destroy\n super\n @destroyed = true\n end", "def destroy\n return false\n end", "def destroy\n return unless destroyable?\n super\n end", "def destroy\n return false if callback(:before_destroy) == false\n result = destroy_without_callbacks\n callback(:after_destroy)\n result\n end", "def destroy\n return false if callback(:before_destroy) == false\n result = destroy_without_callbacks\n callback(:after_destroy)\n result\n end", "def has_lifecycle?\n false\n end", "def destroyable?\n if self.units.size > 0\n return false\n end\n return true\n end", "def will_be_destroyed?(association_name, attributes)\n allow_destroy?(association_name) && has_destroy_flag?(attributes)\n end", "def before_destroy\n # can_destroyed?\n end", "def before_destroy\n # can_destroyed?\n end", "def before_destroy\n # can_destroyed?\n end", "def alive?\n super && !@cancelled\n #!!@alive# && !@cancelled\n end", "def marked_for_destruction?\n @marked_for_destruction\n end", "def detached?\n @obj.nil?\n end", "def detached?\n @obj.nil?\n end", "def destroy?\n event == 'destroy'\n end", "def disposed?\n end", "def destroy\n false\n end", "def destroy\n false\n end", "def destroy\n false\n end", "def destroy\n false\n end", "def finalized?\n self.state != STATE_NEW\n end", "def previously_persisted?\n !new_record? && destroyed?\n end", "def dirty?\n false\n end", "def disposed?\n return @sprites[\"base\"].disposed? if @sprites[\"base\"]\n return true\n end", "def cancelled?\n false\n end", "def destroy\n if delete_cim_profile and super\n return true\n end\n\n return false\n end", "def destroy?\n name == :destroy\n end", "def destruction_allowed?\n success = protect_if_methods.all? do |method, message|\n allowed = protect_method_allows_destruction(method)\n errors.add(:base, message) if !allowed && message\n allowed\n end\n\n throw(:abort) if !success && ActiveRecord::VERSION::MAJOR >= 5\n success\n end", "def _delete\n marked_for_destruction?\n end", "def remove_before_destroy?\n true\n end", "def is_destroyable\n return false if self.publication_id\n return false if self.endnote_files.count > 0\n return true\n end", "def released_true?\n self.released\n end", "def destruction?\n self.diff['attributes']['old'] && !self.diff['attributes']['new']\n end", "def stop\n if self.started\n _dispose!\n self.started = false\n true\n else\n false\n end\n end", "def persisted?\n !new_record? && @destroyed != true\n end", "def destroyable?\n return false unless units.any?\n end", "def disposed? \n @disposed\n end", "def orphan?\n false\n end", "def flag_as_destroyed\n self.destroyed = true\n self.flagged_for_destroy = false\n atomic_path\n end", "def destroyable?\n return false unless units.any?\n end", "def disposed?\n @disposed\n end", "def disposed?\n @_disposed\n end", "def delete_dirty?\n false\n end", "def released? \n self.released \n end", "def new?\n !@object\n end", "def new?\n !@object\n end", "def disposed?\n @sprite.nil? || @sprite.disposed?\n end", "def destroy\n return false if not self.saved?\n return self.class.destroy self\n end", "def destroyable?\n if self.master_files.empty? and self.components.empty? and self.bibls.empty?\n return true\n else\n return false\n end \n end", "def reset()\n super()\n return true\n end", "def disposed?\n @sprite.nil? or @sprite.disposed?\n end", "def destroy\n delete_object\n true\n end", "def dead?()\n #This is a stub, used for indexing\n end", "def dead?\n false\n end", "def closed?\n false\n end", "def destroyable?\n self.quiz_events.empty?\n end", "def disposed?\n fail NotImplementedError\n end", "def disposed?\n fail NotImplementedError\n end", "def disposed?\n fail NotImplementedError\n end", "def persisted?\n !new? && !destroyed?\n end", "def can_destroyed?\n return !self.business_user\n # list of associations to check (automatic)\n #has_assocs = []\n #self.class.reflections.each do |r_name, r|\n # has_assocs << r_name if [:has_one, :has_many, :has_and_belongs_to_many].include? r.macro\n #end\n \n # check for emptyness\n #has_assocs.each do |r_name|\n # assoc = self.send(r_name)\n # nothing = assoc.respond_to?('empty?') ? assoc.empty? : assoc.nil?\n # return false unless nothing\n #end\n \n true\n end" ]
[ "0.78683174", "0.78683174", "0.78683174", "0.78683174", "0.78434885", "0.7822502", "0.7822502", "0.7822502", "0.7822502", "0.7647162", "0.76309353", "0.76284796", "0.76284796", "0.76284796", "0.760819", "0.75922847", "0.7280206", "0.72371733", "0.70902073", "0.705745", "0.7027951", "0.7023413", "0.7017775", "0.70054984", "0.6998796", "0.6871231", "0.6871231", "0.68278044", "0.67634606", "0.67008424", "0.6617581", "0.6617581", "0.6584577", "0.6580676", "0.65805733", "0.6569028", "0.6562014", "0.65604484", "0.65438604", "0.6519255", "0.65116805", "0.6499013", "0.6499013", "0.64773244", "0.64580727", "0.6446413", "0.64456785", "0.64456785", "0.64456785", "0.64364725", "0.643543", "0.6431834", "0.6431834", "0.64245623", "0.64026034", "0.63938814", "0.63938814", "0.63938814", "0.63938814", "0.63344043", "0.63012755", "0.62916446", "0.6284693", "0.62823033", "0.627824", "0.6257972", "0.62461686", "0.62427694", "0.6211588", "0.62032205", "0.61862904", "0.6182923", "0.6178894", "0.6170487", "0.6161694", "0.6148067", "0.61411947", "0.6135484", "0.61343205", "0.61300737", "0.6124884", "0.61184657", "0.6109359", "0.61007553", "0.61007553", "0.60925466", "0.60851604", "0.60768646", "0.6071442", "0.6067338", "0.60596234", "0.6057185", "0.60535634", "0.60498506", "0.60456896", "0.6045578", "0.6045578", "0.6045578", "0.6042716", "0.6031875" ]
0.70107234
23
GET /groups/1/products_auths GET /groups/1/products_auths.json
def index render json: ProductAuth.where({group_id: params[:group_id]}) end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show\n # is_my_resource(params[:id])\n\n # prossumerProductsIds = Prossumer.find(params[:id]).products.ids\n render json: ProductAuth.where({product_id: params[:id], group_id: params[:group_id]}).first.as_json(:include => :product)\n end", "def get_authorization_products_with_http_info(opts = {})\n if @api_client.config.debugging\n @api_client.config.logger.debug \"Calling API: AuthorizationApi.get_authorization_products ...\"\n end\n \n # resource path\n local_var_path = \"/api/v2/authorization/products\".sub('{format}','json')\n\n # query parameters\n query_params = {}\n\n # header parameters\n header_params = {}\n\n # HTTP header 'Accept' (if needed)\n local_header_accept = ['application/json']\n local_header_accept_result = @api_client.select_header_accept(local_header_accept) and header_params['Accept'] = local_header_accept_result\n\n # HTTP header 'Content-Type'\n local_header_content_type = ['application/json']\n header_params['Content-Type'] = @api_client.select_header_content_type(local_header_content_type)\n\n # form parameters\n form_params = {}\n\n # http body (model)\n post_body = nil\n \n auth_names = ['PureCloud OAuth']\n data, status_code, headers = @api_client.call_api(:GET, local_var_path,\n :header_params => header_params,\n :query_params => query_params,\n :form_params => form_params,\n :body => post_body,\n :auth_names => auth_names,\n :return_type => 'OrganizationProductEntityListing')\n if @api_client.config.debugging\n @api_client.config.logger.debug \"API called: AuthorizationApi#get_authorization_products\\nData: #{data.inspect}\\nStatus code: #{status_code}\\nHeaders: #{headers}\"\n end\n return data, status_code, headers\n end", "def get_authorization_products(opts = {})\n data, _status_code, _headers = get_authorization_products_with_http_info(opts)\n return data\n end", "def user_products\n @products = current_user.products\n\n respond_to do |format|\n format.html\n format.json { render json: @products }\n end\n end", "def products\n request :public, :get, :products\n end", "def index\n @products = Product.all.page(params[:page]).per(10)\n authorize Product\n end", "def index\n @product_product_groups = ProductProductGroup.all\n end", "def index\n @product_groups = ProductGroup.all\n end", "def show\n @product_group = ProductGroup.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product_group }\n end\n end", "def product_list\n current_user.products.order('name').collect {|p| [ p.name, p.ticket_project_id, p.id ]}\n end", "def obtains_product\n product = Product.find(params[:product_id])\n @product = product.user_id == @current_user.id ? product : nil\n (render(json: { e: 'AUTH' }, status: :unauthorized) && nil) if @product.nil?\n end", "def index\n @products = @user.products\n # was @products = Product.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @products }\n end\n end", "def index\n @products = current_user.products.all\n end", "def find_products\n\n product_ids = session[:compare_products] || []\n if product_ids.length > 4\n flash[:notice] = I18n.t('compare_products.limit_is_4')\n product_ids = product_ids[0..3]\n elsif product_ids.length < 1\n flash[:error] = I18n.t('compare_products.insufficient_data')\n redirect_to \"/t/#{@taxon.permalink}\"\n end\n @products = Spree::Product.find(:all, :conditions => { :id => product_ids},\n :include => { :product_properties => :property },\n :limit => 4)\n end", "def index\n @products_colors = ProductsColor.all\n authorize ProductsColor\n end", "def index\n @group_products = GroupProduct.all\n end", "def index\n authorize Group\n render :json => @group.group_memberships\n end", "def get_products()\n\tputs \"Getting products\"\n\tresponse = request_get(\"/api/product\")\n\tputs response.body\nend", "def all\n @products = Product.get_list_active_products.page(params[:page]).per(10)\n if @products.present?\n @products\n else\n @object = 'product'\n render \"api/v1/errors/404\", status: 401\n end\n end", "def products\n run(:get,\"/school_products\", [200])\n end", "def index\n @products = current_user.products\n end", "def index\n auth_response = plaid_client.auth.get(access_token)\n render json: auth_response.to_json\n end", "def index\n if current_user.admin?\n @products = Product.all\n else\n if current_user.private?\n @products = Product.where(owner_id: current_user.private_id)\n elsif current_user.business?\n @products = Product.where(owner_id: current_user.business_id)\n end\n\n end\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @products }\n end\n end", "def index\n @groups = current_user.groups\n render json: @groups\n end", "def index\n if is_my_resource(params[:prossumer_id])\n @products = Product.where(prossumer_id: params[:prossumer_id]).as_json({\n cycle_id: params[:cycle_id],\n include: {\n prossumer: {\n except: [:encrypted_password, :salt, :confirm_hash]\n },\n product_category: {}\n }\n })\n render json: @products\n end\n end", "def index\n @groupsIds = GroupsProssumer.where(prossumer_id: params[:prossumer_id]).pluck(:group_id)\n @groups = Group.where(id: @groups)\n render json: @groups\n end", "def show\n @group = Group.find_by_guid(params[:id])\n respond_to do |format|\n format.json {\n if @group.password == params[:password]\n render json: @group\n else\n render json: \"Forbidden\", status: :forbidden\n end\n }\n format.html # show.html.erb\n end\n end", "def index_users\n authorize Group, :index?\n render :json => @group.users\n end", "def index\n @api_v1_products = Product.all\n json_response(@api_v1_products)\n end", "def list_fg_products\n\treturn if authorise_for_web('fg_product','read') == false \n\n \tif params[:page]!= nil \n\n \t\tsession[:fg_products_page] = params['page']\n\n\t\t render_list_fg_products\n\n\t\t return \n\telse\n\t\tsession[:fg_products_page] = nil\n\tend\n\n\tlist_query = \"@fg_product_pages = Paginator.new self, FgProduct.count, @@page_size,@current_page\n\t @fg_products = FgProduct.find(:all,\n\t\t\t\t :limit => @fg_product_pages.items_per_page,\n\t\t\t\t :order => 'fg_product_code',\n\t\t\t\t :offset => @fg_product_pages.current.offset)\"\n\tsession[:query] = list_query\n\trender_list_fg_products\nend", "def groups\n \n \n @groups = @current_user.groups\n render 'groups.json.jbuilder', status: :ok\n end", "def index\n Shop.set_store_session\n @products = ShopifyAPI::Product.all\n end", "def auth_projects\n projects =\n services.identity.auth_projects(@scoped_domain_id).sort_by(&:name)\n render json: { auth_projects: projects }\n end", "def products_path\n \tmain_app.send(Auth::OmniAuth::Path.create_or_index_path(Auth.configuration.product_class))\n end", "def get_products_by_ids\n products_ids = params[:products_ids]\n products = Array.new\n JSON.parse(products_ids).each do |product_id|\n product = Product.find_by_id(product_id)\n if product\n products.push(product)\n end\n end\n render json: {products: serialized_products(products)}, status: :ok\n end", "def all\n\n res=[]\n buf={}\n @groups = Group.where('kls_parent=1613').order(\"NAME ASC\").each do |grp|\n\t if grp.products.exists?\n buf={}\n\t\tbuf[:name] = grp.name\n\t\tbuf[:id] = grp.id\n\t\tbuf[:kls_parent] = grp.kls_parent\n\t\tbuf[:kls_unicode] = grp.kls_unicode\n\t\tbuf[:kls_childcount] = grp.kls_childcount\n \t res.push(buf)\n\t end\n end\n # @rgroups = @group\n# @groups = Group.where('kls_parent=1613')\n# render json: @groups\n render json: res\n\n end", "def index\n @product_images = ProductImage.where(product_uuid: params[:product_id])\n render json: @product_images, status: 200\n end", "def product_groups\n @product_groups ||= ProductGroupProxy.new(self)\n end", "def index\n @groups_with_el = Provider.pluck(:group_id).uniq - [nil, 0]\n \n @groups = Provider.where(is_group: true).order(:name).map{|g| {\n id: g.id, \n name: g.name,\n spec: g.spec,\n allow_delete: !@groups_with_el.include?(g.id),\n goods_types_array: g.goods_type_names_array.join(', ')\n }}\n\n store_providers_path\n end", "def index\n @food_products = FoodProduct.where(group_id: @groups)\n end", "def _products_manage_list\r\n\t\t\tproject = Project.find params[:search][:id]\r\n\r\n\t\t\t# Author\r\n\t\t\tauthorize! :edit, project\r\n\r\n\t\t\tproducts = Project.product_search_with_params params[:search]\r\n\r\n\t\t\treturn render json: { status: 1 } if products[:real_estates].blank? && products[:floor_real_estates].blank?\r\n\r\n\t\t\t# Paging\r\n\t\t\tper = 30\r\n\t\t\tcount = products[:real_estates].count + products[:floor_real_estates].count\r\n\t\t\tparams[:page] ||= 1\r\n\t\t\tpage = params[:page].to_i\r\n\t\t\tif products[:real_estates].present?\r\n\t\t\t\toffset = (page - 1) * per\r\n\r\n\t\t\t\tif offset >= products[:real_estates].count\r\n\t\t\t\t\toffset -= products[:real_estate].count\r\n\t\t\t\t\tproducts[:real_estates] = []\r\n\t\t\t\t\tproducts[:floor_real_estates] = products[:floor_real_estates].offset(offset).limit(per)\r\n\t\t\t\telse\r\n\t\t\t\t\tproducts[:real_estates] = products[:real_estates].offset(offset).limit(per)\r\n\t\t\t\t\tif products[:real_estate].count == per\r\n\t\t\t\t\t\tproducts[:floor_real_estates] = []\r\n\t\t\t\t\telse\r\n\t\t\t\t\t\tlimit = per - products[:real_estates].count\r\n\t\t\t\t\t\tproducts[:floor_real_estates].limit(limit)\r\n\t\t\t\t\tend\r\n\t\t\t\tend\r\n\t\t\telse\r\n\t\t\t\tproducts[:floor_real_estates] = products[:floor_real_estates].page page, per\r\n\t\t\tend\r\n\r\n\t\t\trender json: {\r\n\t\t\t\tstatus: 0,\r\n\t\t\t\tresult: {\r\n\t\t\t\t\tlist: render_to_string(partial: 'products_list', locals: { products: products }),\r\n\t\t\t\t\tpagination: render_to_string(partial: 'shared/pagination', locals: { page: page, per: per, total: count })\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\tend", "def index\n @products_grupos = ProductsGrupo.all\n end", "def getKind\n @products = Product.where(\"kind = ?\", params[:kind]).available.PriceOrder.paginate(page: params[:page], per_page: 5)\n render json: @products\n end", "def show\n @products = @portfolio.products.includes(:groups).order(\"products.name\", \"groups.name\").uniq.paginate(:page => params[:products_page], \n :per_page => session[:results_per_page])\n @total_allocation = @portfolio.get_total_allocation(@year, @allocation_precision)\n @groups = Group.includes(:product_group_portfolios => :product).where(:product_group_portfolios => {:portfolio_id => @portfolio.id}).order(\"groups.name\", \"products.name\").uniq\n end", "def get_all_products\n\n\t@parsed_products_array = []\n\n\tshopify_products = HTTParty.get(\"https://roquetest.myshopify.com/admin/products.json\", :headers => ShopifyHeaders).parsed_response\n\n\tshopify_products['products'].each do |product|\n\t\ttitle \t\t= product['title']\n\t\tprice \t\t= product['variants'][0]['price']\n\t\timg \t\t= product['images'][0]['src']\n\t\tproduct_id \t= product['variants'][0]['product_id']\n\n\t\tshop \t\t= \"Shopify\"\n\n\t\t@parsed_products_array << ParsedProduct.new(product_id, title, price, img, shop)\n\tend\n\n\treturn @parsed_products_array\nend", "def get_products(add_params = nil)\n params = {\n uid: uid,\n }\n api_call('/stores/:uid/products(.:format)',:get,params,add_params)\n end", "def index\n @groups = current_user.groups\n @groups = @groups.where('name LIKE ?', \"%#{params[:q]}%\") if params[:q].present?\n @groups = current_user.groups_sorted_by_admin_state_and_name(@groups)\n @groups_pictures = Group.group_images_hash_for_groups @groups\n\n respond_to do |format|\n format.html { render :index }\n format.json { render json: @groups }\n end\n end", "def products(params = {})\n @products ||= product_ids.map do |product_id|\n client.products.find(product_id)\n end\n end", "def get_accessible_products\n return call('Product.get_accessible_products')\n end", "def index\n authorize UserGroup\n @user_groups = UserGroup.all\n end", "def index\n @host_groups = HostGroup.with_permissions_to(:show)\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @host_groups }\n end\n end", "def get_products\n products = response['products']\n products_ops = []\n products.each_value do |value|\n product = {}\n product[:sku] = value['sku']\n product[:product_family] = value['productFamily']\n product[:pricing_list_id] = pricing_list.id\n attributes = value['attributes']\n product[:service_code] = attributes['servicecode']\n product[:location] = attributes['location']\n product[:location_type] = attributes['locationType']\n product[:usage_type] = attributes['usagetype']\n product[:operation] = attributes['operation']\n product[:request_description] = attributes['requestDescription']\n product[:request_type] = attributes['requestType']\n product[:service_name] = attributes['servicename']\n product[:pricing_list_id] = pricing_list.id\n product[:created_at] = Time.zone.now.utc\n product[:updated_at] = Time.zone.now.utc\n products_ops << product\n end\n products_ops\n end", "def update\n @product_auth = ProductAuth.where({product_id: params[:id], group_id: params[:group_id]}).first\n\n if @product_auth.update(params.permit(:state))\n render json: @product_auth\n else\n render json: @product_auth.errors\n end\n end", "def admin_list\n @products = Product.find(:all)\n end", "def create\n\n params[:state] = 1;\n @product_auth = ProductAuth.new(params.permit(:state, :product_id, :group_id, :ecos, :euros));\n if @product_auth.save\n render json: @product_auth\n else\n render json: @product_auth.errors\n end\n end", "def index\n @cart_products = if current_user.cart.present?\n current_user.cart.carts_products.includes(:product).all\n else\n []\n end\n render formats: :json\n end", "def products_aggregate\n quantities = GroupBoxContent.sum(:quantity,group: :product_id)\n quantities.delete_if {|key, value| value == 0 } \n\n products = Product.find(quantities.keys)\n @products_quantities = {}\n products.each do |product|\n @products_quantities[product] = quantities[product.id]\n end\n\n respond_to do |format|\n format.html # products_aggregate.html.erb\n format.json { render json: @groups }\n end\n end", "def new\n @product_group = ProductGroup.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @product_group }\n end\n end", "def index\n @resources = Group.search(params[:search]).where(id: current_user.admin_groups).order(\"#{sort_column} #{sort_direction}\").paginate(per_page: 11, page: params[:page])\n authorize @resources\n end", "def index\n @user = User.find_by(token: params[:token])\n if params[:user_id] && params[:token]\n render json: Product.where(:user_id => @user.id , :active => true).all, status: :ok\n else\n render json: Product.all, status: :ok\n end\n end", "def all_auth(group)\n return nil unless AUTH_STRINGS.has_key? group\n @users.select {|_, auth| auth == group}.keys\n end", "def show\n @logical_product = LogicalProduct.find(params[:id])\n @variable_groups = @logical_product.variable_groups\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @logical_product }\n end\n end", "def show_group\n group_name = params[:name]\n @group = $iam.groups[group_name]\n @users = @group.users\n @policies = @group.policies\n end", "def show\n session.delete(:size)\n @products_color = ProductsColor.find(params[:id])\n @product = @products_color.product\n # TODO testing\n respond_to do |format|\n if user_signed_in?\n format.html {authorize @products_color}\n else\n format.html {authenticate_user!}\n end\n # TODO fix it\n if @products_color.images.present?\n format.js {}\n else\n format.js { render 'products_colors/no_images' }\n end\n\n end\n end", "def get_groups\n @course = Course.find(params[:course_id])\n render json: @course.groups\n end", "def user_product_listings\n render json: @user.product_listings\n end", "def index\n\n#client = Google::APIClient.new\n\n@products = Product.paginate(:page => params[:page], :per_page => 30)\n@manufacturers = Product.uniq.pluck(:manufacturer)\n@categories = Product.uniq.pluck(:type_b)\n@sub_categories = Product.uniq.pluck(:sub_type)\n\nend", "def index\n @products = current_company.products.order('created_at desc').page(params[:page]).per(20)\n render json: @products, meta: {total_pages: @products.total_pages, total_count: @products.total_count}\n end", "def index\n @product_managements = ProductManagement.all\n\n render json: @product_managements\n end", "def products\n Product.find_all_by_vendor_id(@id)\n end", "def products_overview\n\n @products_quantities = {}\n @boxes = Box.all\n\n @boxes.each do |box|\n quantities = GroupBoxContent.sum(:quantity,group: :product_id, conditions:{\"group_boxes.box_id\" => box.id},joins: :box)\n quantities.delete_if {|key, value| value == 0 }\n\n products = Product.all\n products.each do |product|\n @products_quantities[product] ||= {}\n @products_quantities[product][box] ||= quantities[product.id]\n end\n end\n\n respond_to do |format|\n format.html # products_overview.html.erb\n format.json { render json: @groups }\n end\n end", "def index\n @products = Product.all\n render json: @products\n end", "def list_groups\n if @user.permission_level.value == PermissionLevel.order(\"value DESC\").first.value\n render :json => Group.find(:all).map{|g| g}\n else\n render :json => @user.groups.map{|g| g}\n end\n end", "def show\n #logger.info \"GroupsController Get Parameters: #{params}\"\n if @group\n render json: @group.to_json(:include => {:memberships => {:only => [:admin], :include => {:user => {:only => [:id, :first_name, :last_name, :email]}}}})\n else\n render json: {error: \"YOU MUST BE MEMBER OF THIS GROUP TO SEE IT\"}, status: :unprocessable_entity\n end\n end", "def index\n render json: current_user.membered_groups\n end", "def product_images\n user_id, product = params[:user_id], params[:id]\n return bad_request if !user_id || !product\n # returns all images for a given user and product\n images = UserProduct.find_images(user_id, product)\n # create json array\n img = images ? images.collect { |i| i.js_serialize } : []\n render :json => img\n end", "def show \n user = current_user.user_info\n product = user.products.find(params[:id]) if !user.admin\n product = Product.find(params[:id]) if user.admin \n # respond_to do |format|\n # format.html # show.html.erb\n # format.json { render json: @product }\n # end\n p = ShopifyAPI::Product.find product.shopify_id\n redirect_to 'http://shopulse.myshopify.com/products/'+p.handle\n end", "def index\n\n respond_to do |format|\n format.html {\n @search = ProductTreeGroup.search(params[:search])\n @product_tree_groups = @search.paginate(:page => params[:page], :per_page => GlobalSettings.per_page).order('id DESC')\n }\n format.json { \n render json: ProductTreeGroup.select(\"id, name as text\").to_json\n }\n end\n end", "def get_products_with_http_info(opts = {})\n if @api_client.config.debugging\n @api_client.config.logger.debug 'Calling API: PricesApi.get_products ...'\n end\n # resource path\n local_var_path = '/v1/products'\n\n # query parameters\n query_params = {}\n query_params[:'provider_id'] = opts[:'provider_id'] if !opts[:'provider_id'].nil?\n query_params[:'service_id'] = opts[:'service_id'] if !opts[:'service_id'].nil?\n query_params[:'name'] = opts[:'name'] if !opts[:'name'].nil?\n\n # header parameters\n header_params = {}\n # HTTP header 'Accept' (if needed)\n header_params['Accept'] = @api_client.select_header_accept(['application/json'])\n\n # form parameters\n form_params = {}\n\n # http body (model)\n post_body = nil\n auth_names = ['oauth2']\n data, status_code, headers = @api_client.call_api(:GET, local_var_path,\n :header_params => header_params,\n :query_params => query_params,\n :form_params => form_params,\n :body => post_body,\n :auth_names => auth_names,\n :return_type => 'Products')\n if @api_client.config.debugging\n @api_client.config.logger.debug \"API called: PricesApi#get_products\\nData: #{data.inspect}\\nStatus code: #{status_code}\\nHeaders: #{headers}\"\n end\n return data, status_code, headers\n end", "def get_products( session_key)\n response_xml = self.call( :get_products, message: {\n arg0: session_key\n })\n response = IssueCentre::Response.parse( response_xml)\n end", "def show\n render json: @auth_ip_address_group, status: :ok\n end", "def index\n @products = Product.all\n logger.info current_user.name\n end", "def index\n b_admin = current_user.admin? rescue false\n @products = Product.filter_by_params(b_admin, params)\n #@products = Product.available\n \n @title = Product.page_description(params)\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @products }\n end\n end", "def index\n @admin_products = Admin::Product.all\n end", "def my_groups\n groups = Group.joins(:group_users).where(:group_users => {user_id: params[:user_id]})\n\n paginate json: groups\n end", "def index\n @user_interactive_products = UserInteractiveProduct.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @user_interactive_products }\n end\n end", "def find_products(payload = {})\n request('FindProducts', payload)\n end", "def authorize_product!(product)\n unless product.nil?\n unless current_user.products.include?(product)\n raise Exceptions::ProductAccessDenied\n end\n end\n end", "def index\n @roles = record.roles.includes(:resource)\n render jsonapi: @roles, include: %i[users groups resource]\n end", "def unit_show\n @product = Product.find(params[:id])\n\n @store_cust_group = CustomerGroup.find_by_name(\"Bakul/Toko\")\n @workshop_cust_group = CustomerGroup.find_by_name(\"Bengkel/Montir\")\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def index\n \tproducts = Product.all\n \trender json: products\n \tend", "def getProduct( product_id)\n params = Hash.new\n params['product_id'] = product_id\n return doCurl(\"get\",\"/product\",params)\n end", "def index\n @occasions = current_user.occasions.sorteddesc.all.paginate(page: params[:page], :per_page => 10)\n @products = Product.all\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @occasions }\n end\n end", "def getTokenProduct( entity_id, product_id, language, portal_name, flatpack_id, source, channel, campaign)\n params = Hash.new\n params['entity_id'] = entity_id\n params['product_id'] = product_id\n params['language'] = language\n params['portal_name'] = portal_name\n params['flatpack_id'] = flatpack_id\n params['source'] = source\n params['channel'] = channel\n params['campaign'] = campaign\n return doCurl(\"get\",\"/token/product\",params)\n end", "def show\n @product = Product.find(params[:id]) \n @admin_images = Image.admins_photos.where(product_id: @product.id).order(:title) || []\n @users_images = Image.users_photos.where(product_id: @product.id).order(:title) || []\n respond_to do |format|\n format.html\n format.js \n format.json { render json: @product }\n end\n end", "def index\n authorize! :see, Group\n @groups = Group.all\n end", "def index\n page = params[:page].to_i\n @admin_products = Product.page(page).per(10)\n end", "def index\n @products_purposes_relations = ProductsPurposesRelation.all\n end", "def index\n @products = Product.joins(:artist).order(session[:order_by])\n .paginate(:page => params[:page])\n\n @sort_order_selected = session[:order_by]\n #@products = Product.resent(5)\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @products }\n end\n end", "def product(name)\n get(\"/apiproducts/#{name}\")\n end" ]
[ "0.71310353", "0.6766916", "0.6560989", "0.60197455", "0.5978788", "0.59639066", "0.59567684", "0.5933693", "0.59054315", "0.5844431", "0.58251", "0.57575804", "0.57272196", "0.57269174", "0.57219607", "0.57190156", "0.57056576", "0.56819797", "0.5678574", "0.5677779", "0.5669512", "0.56566894", "0.56358135", "0.55826914", "0.5578117", "0.5571685", "0.5566151", "0.55426574", "0.55399096", "0.55275226", "0.55205697", "0.551731", "0.5510278", "0.5505016", "0.5486317", "0.54185265", "0.54064715", "0.5401707", "0.5387311", "0.5379762", "0.5378269", "0.5376202", "0.53723437", "0.5358262", "0.53512156", "0.53493464", "0.5319545", "0.53171206", "0.5294135", "0.52890366", "0.5288331", "0.5286456", "0.5280224", "0.52789044", "0.5264723", "0.5258037", "0.5257858", "0.52473694", "0.52468854", "0.523925", "0.52327925", "0.5232597", "0.5228524", "0.522809", "0.52148706", "0.5211375", "0.52110195", "0.5195811", "0.5189446", "0.51848936", "0.51702654", "0.5157701", "0.5154102", "0.5151661", "0.5150169", "0.5149659", "0.51459765", "0.5144997", "0.51448315", "0.5141792", "0.5134435", "0.5129559", "0.51224446", "0.51122856", "0.5105777", "0.5103039", "0.5092002", "0.5091911", "0.50858957", "0.50846756", "0.50812495", "0.5079759", "0.5077958", "0.50748545", "0.50727373", "0.5068908", "0.50685126", "0.5051259", "0.504998", "0.5043869" ]
0.76373637
0
GET /groups/1/products_auths/1 GET /groups/1/products_auths/1.json
def show # is_my_resource(params[:id]) # prossumerProductsIds = Prossumer.find(params[:id]).products.ids render json: ProductAuth.where({product_id: params[:id], group_id: params[:group_id]}).first.as_json(:include => :product) end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index\n render json: ProductAuth.where({group_id: params[:group_id]})\n end", "def get_authorization_products_with_http_info(opts = {})\n if @api_client.config.debugging\n @api_client.config.logger.debug \"Calling API: AuthorizationApi.get_authorization_products ...\"\n end\n \n # resource path\n local_var_path = \"/api/v2/authorization/products\".sub('{format}','json')\n\n # query parameters\n query_params = {}\n\n # header parameters\n header_params = {}\n\n # HTTP header 'Accept' (if needed)\n local_header_accept = ['application/json']\n local_header_accept_result = @api_client.select_header_accept(local_header_accept) and header_params['Accept'] = local_header_accept_result\n\n # HTTP header 'Content-Type'\n local_header_content_type = ['application/json']\n header_params['Content-Type'] = @api_client.select_header_content_type(local_header_content_type)\n\n # form parameters\n form_params = {}\n\n # http body (model)\n post_body = nil\n \n auth_names = ['PureCloud OAuth']\n data, status_code, headers = @api_client.call_api(:GET, local_var_path,\n :header_params => header_params,\n :query_params => query_params,\n :form_params => form_params,\n :body => post_body,\n :auth_names => auth_names,\n :return_type => 'OrganizationProductEntityListing')\n if @api_client.config.debugging\n @api_client.config.logger.debug \"API called: AuthorizationApi#get_authorization_products\\nData: #{data.inspect}\\nStatus code: #{status_code}\\nHeaders: #{headers}\"\n end\n return data, status_code, headers\n end", "def get_authorization_products(opts = {})\n data, _status_code, _headers = get_authorization_products_with_http_info(opts)\n return data\n end", "def user_products\n @products = current_user.products\n\n respond_to do |format|\n format.html\n format.json { render json: @products }\n end\n end", "def obtains_product\n product = Product.find(params[:product_id])\n @product = product.user_id == @current_user.id ? product : nil\n (render(json: { e: 'AUTH' }, status: :unauthorized) && nil) if @product.nil?\n end", "def show\n @product_group = ProductGroup.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product_group }\n end\n end", "def products\n request :public, :get, :products\n end", "def index\n @products = Product.all.page(params[:page]).per(10)\n authorize Product\n end", "def index\n @products = @user.products\n # was @products = Product.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @products }\n end\n end", "def index\n @product_product_groups = ProductProductGroup.all\n end", "def index\n if is_my_resource(params[:prossumer_id])\n @products = Product.where(prossumer_id: params[:prossumer_id]).as_json({\n cycle_id: params[:cycle_id],\n include: {\n prossumer: {\n except: [:encrypted_password, :salt, :confirm_hash]\n },\n product_category: {}\n }\n })\n render json: @products\n end\n end", "def product_list\n current_user.products.order('name').collect {|p| [ p.name, p.ticket_project_id, p.id ]}\n end", "def index\n @product_groups = ProductGroup.all\n end", "def get_products()\n\tputs \"Getting products\"\n\tresponse = request_get(\"/api/product\")\n\tputs response.body\nend", "def find_products\n\n product_ids = session[:compare_products] || []\n if product_ids.length > 4\n flash[:notice] = I18n.t('compare_products.limit_is_4')\n product_ids = product_ids[0..3]\n elsif product_ids.length < 1\n flash[:error] = I18n.t('compare_products.insufficient_data')\n redirect_to \"/t/#{@taxon.permalink}\"\n end\n @products = Spree::Product.find(:all, :conditions => { :id => product_ids},\n :include => { :product_properties => :property },\n :limit => 4)\n end", "def index\n @products = current_user.products\n end", "def index\n @products = current_user.products.all\n end", "def index\n if current_user.admin?\n @products = Product.all\n else\n if current_user.private?\n @products = Product.where(owner_id: current_user.private_id)\n elsif current_user.business?\n @products = Product.where(owner_id: current_user.business_id)\n end\n\n end\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @products }\n end\n end", "def index\n @api_v1_products = Product.all\n json_response(@api_v1_products)\n end", "def index\n @products_colors = ProductsColor.all\n authorize ProductsColor\n end", "def index\n @group_products = GroupProduct.all\n end", "def show\n @group = Group.find_by_guid(params[:id])\n respond_to do |format|\n format.json {\n if @group.password == params[:password]\n render json: @group\n else\n render json: \"Forbidden\", status: :forbidden\n end\n }\n format.html # show.html.erb\n end\n end", "def all\n @products = Product.get_list_active_products.page(params[:page]).per(10)\n if @products.present?\n @products\n else\n @object = 'product'\n render \"api/v1/errors/404\", status: 401\n end\n end", "def products\n run(:get,\"/school_products\", [200])\n end", "def products_path\n \tmain_app.send(Auth::OmniAuth::Path.create_or_index_path(Auth.configuration.product_class))\n end", "def index\n auth_response = plaid_client.auth.get(access_token)\n render json: auth_response.to_json\n end", "def index\n @product_images = ProductImage.where(product_uuid: params[:product_id])\n render json: @product_images, status: 200\n end", "def create\n\n params[:state] = 1;\n @product_auth = ProductAuth.new(params.permit(:state, :product_id, :group_id, :ecos, :euros));\n if @product_auth.save\n render json: @product_auth\n else\n render json: @product_auth.errors\n end\n end", "def index\n authorize Group\n render :json => @group.group_memberships\n end", "def new\n @product_group = ProductGroup.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @product_group }\n end\n end", "def getKind\n @products = Product.where(\"kind = ?\", params[:kind]).available.PriceOrder.paginate(page: params[:page], per_page: 5)\n render json: @products\n end", "def update\n @product_auth = ProductAuth.where({product_id: params[:id], group_id: params[:group_id]}).first\n\n if @product_auth.update(params.permit(:state))\n render json: @product_auth\n else\n render json: @product_auth.errors\n end\n end", "def index\n @groupsIds = GroupsProssumer.where(prossumer_id: params[:prossumer_id]).pluck(:group_id)\n @groups = Group.where(id: @groups)\n render json: @groups\n end", "def index\n Shop.set_store_session\n @products = ShopifyAPI::Product.all\n end", "def index\n @groups = current_user.groups\n render json: @groups\n end", "def get_products_by_ids\n products_ids = params[:products_ids]\n products = Array.new\n JSON.parse(products_ids).each do |product_id|\n product = Product.find_by_id(product_id)\n if product\n products.push(product)\n end\n end\n render json: {products: serialized_products(products)}, status: :ok\n end", "def get_products\n products = response['products']\n products_ops = []\n products.each_value do |value|\n product = {}\n product[:sku] = value['sku']\n product[:product_family] = value['productFamily']\n product[:pricing_list_id] = pricing_list.id\n attributes = value['attributes']\n product[:service_code] = attributes['servicecode']\n product[:location] = attributes['location']\n product[:location_type] = attributes['locationType']\n product[:usage_type] = attributes['usagetype']\n product[:operation] = attributes['operation']\n product[:request_description] = attributes['requestDescription']\n product[:request_type] = attributes['requestType']\n product[:service_name] = attributes['servicename']\n product[:pricing_list_id] = pricing_list.id\n product[:created_at] = Time.zone.now.utc\n product[:updated_at] = Time.zone.now.utc\n products_ops << product\n end\n products_ops\n end", "def list_fg_products\n\treturn if authorise_for_web('fg_product','read') == false \n\n \tif params[:page]!= nil \n\n \t\tsession[:fg_products_page] = params['page']\n\n\t\t render_list_fg_products\n\n\t\t return \n\telse\n\t\tsession[:fg_products_page] = nil\n\tend\n\n\tlist_query = \"@fg_product_pages = Paginator.new self, FgProduct.count, @@page_size,@current_page\n\t @fg_products = FgProduct.find(:all,\n\t\t\t\t :limit => @fg_product_pages.items_per_page,\n\t\t\t\t :order => 'fg_product_code',\n\t\t\t\t :offset => @fg_product_pages.current.offset)\"\n\tsession[:query] = list_query\n\trender_list_fg_products\nend", "def show \n user = current_user.user_info\n product = user.products.find(params[:id]) if !user.admin\n product = Product.find(params[:id]) if user.admin \n # respond_to do |format|\n # format.html # show.html.erb\n # format.json { render json: @product }\n # end\n p = ShopifyAPI::Product.find product.shopify_id\n redirect_to 'http://shopulse.myshopify.com/products/'+p.handle\n end", "def getProduct( product_id)\n params = Hash.new\n params['product_id'] = product_id\n return doCurl(\"get\",\"/product\",params)\n end", "def show\n @logical_product = LogicalProduct.find(params[:id])\n @variable_groups = @logical_product.variable_groups\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @logical_product }\n end\n end", "def index_users\n authorize Group, :index?\n render :json => @group.users\n end", "def index\n @user = User.find_by(token: params[:token])\n if params[:user_id] && params[:token]\n render json: Product.where(:user_id => @user.id , :active => true).all, status: :ok\n else\n render json: Product.all, status: :ok\n end\n end", "def show\n @products = @portfolio.products.includes(:groups).order(\"products.name\", \"groups.name\").uniq.paginate(:page => params[:products_page], \n :per_page => session[:results_per_page])\n @total_allocation = @portfolio.get_total_allocation(@year, @allocation_precision)\n @groups = Group.includes(:product_group_portfolios => :product).where(:product_group_portfolios => {:portfolio_id => @portfolio.id}).order(\"groups.name\", \"products.name\").uniq\n end", "def user_product_listings\n render json: @user.product_listings\n end", "def show\n @product = @user.products.find(params[:id])\n # was @product = Product.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def _products_manage_list\r\n\t\t\tproject = Project.find params[:search][:id]\r\n\r\n\t\t\t# Author\r\n\t\t\tauthorize! :edit, project\r\n\r\n\t\t\tproducts = Project.product_search_with_params params[:search]\r\n\r\n\t\t\treturn render json: { status: 1 } if products[:real_estates].blank? && products[:floor_real_estates].blank?\r\n\r\n\t\t\t# Paging\r\n\t\t\tper = 30\r\n\t\t\tcount = products[:real_estates].count + products[:floor_real_estates].count\r\n\t\t\tparams[:page] ||= 1\r\n\t\t\tpage = params[:page].to_i\r\n\t\t\tif products[:real_estates].present?\r\n\t\t\t\toffset = (page - 1) * per\r\n\r\n\t\t\t\tif offset >= products[:real_estates].count\r\n\t\t\t\t\toffset -= products[:real_estate].count\r\n\t\t\t\t\tproducts[:real_estates] = []\r\n\t\t\t\t\tproducts[:floor_real_estates] = products[:floor_real_estates].offset(offset).limit(per)\r\n\t\t\t\telse\r\n\t\t\t\t\tproducts[:real_estates] = products[:real_estates].offset(offset).limit(per)\r\n\t\t\t\t\tif products[:real_estate].count == per\r\n\t\t\t\t\t\tproducts[:floor_real_estates] = []\r\n\t\t\t\t\telse\r\n\t\t\t\t\t\tlimit = per - products[:real_estates].count\r\n\t\t\t\t\t\tproducts[:floor_real_estates].limit(limit)\r\n\t\t\t\t\tend\r\n\t\t\t\tend\r\n\t\t\telse\r\n\t\t\t\tproducts[:floor_real_estates] = products[:floor_real_estates].page page, per\r\n\t\t\tend\r\n\r\n\t\t\trender json: {\r\n\t\t\t\tstatus: 0,\r\n\t\t\t\tresult: {\r\n\t\t\t\t\tlist: render_to_string(partial: 'products_list', locals: { products: products }),\r\n\t\t\t\t\tpagination: render_to_string(partial: 'shared/pagination', locals: { page: page, per: per, total: count })\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\tend", "def get_products(add_params = nil)\n params = {\n uid: uid,\n }\n api_call('/stores/:uid/products(.:format)',:get,params,add_params)\n end", "def show\n session.delete(:size)\n @products_color = ProductsColor.find(params[:id])\n @product = @products_color.product\n # TODO testing\n respond_to do |format|\n if user_signed_in?\n format.html {authorize @products_color}\n else\n format.html {authenticate_user!}\n end\n # TODO fix it\n if @products_color.images.present?\n format.js {}\n else\n format.js { render 'products_colors/no_images' }\n end\n\n end\n end", "def product_groups\n @product_groups ||= ProductGroupProxy.new(self)\n end", "def all\n\n res=[]\n buf={}\n @groups = Group.where('kls_parent=1613').order(\"NAME ASC\").each do |grp|\n\t if grp.products.exists?\n buf={}\n\t\tbuf[:name] = grp.name\n\t\tbuf[:id] = grp.id\n\t\tbuf[:kls_parent] = grp.kls_parent\n\t\tbuf[:kls_unicode] = grp.kls_unicode\n\t\tbuf[:kls_childcount] = grp.kls_childcount\n \t res.push(buf)\n\t end\n end\n # @rgroups = @group\n# @groups = Group.where('kls_parent=1613')\n# render json: @groups\n render json: res\n\n end", "def products_aggregate\n quantities = GroupBoxContent.sum(:quantity,group: :product_id)\n quantities.delete_if {|key, value| value == 0 } \n\n products = Product.find(quantities.keys)\n @products_quantities = {}\n products.each do |product|\n @products_quantities[product] = quantities[product.id]\n end\n\n respond_to do |format|\n format.html # products_aggregate.html.erb\n format.json { render json: @groups }\n end\n end", "def get_all_products\n\n\t@parsed_products_array = []\n\n\tshopify_products = HTTParty.get(\"https://roquetest.myshopify.com/admin/products.json\", :headers => ShopifyHeaders).parsed_response\n\n\tshopify_products['products'].each do |product|\n\t\ttitle \t\t= product['title']\n\t\tprice \t\t= product['variants'][0]['price']\n\t\timg \t\t= product['images'][0]['src']\n\t\tproduct_id \t= product['variants'][0]['product_id']\n\n\t\tshop \t\t= \"Shopify\"\n\n\t\t@parsed_products_array << ParsedProduct.new(product_id, title, price, img, shop)\n\tend\n\n\treturn @parsed_products_array\nend", "def groups\n \n \n @groups = @current_user.groups\n render 'groups.json.jbuilder', status: :ok\n end", "def unit_show\n @product = Product.find(params[:id])\n\n @store_cust_group = CustomerGroup.find_by_name(\"Bakul/Toko\")\n @workshop_cust_group = CustomerGroup.find_by_name(\"Bengkel/Montir\")\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product }\n end\n end", "def index\n @product_managements = ProductManagement.all\n\n render json: @product_managements\n end", "def product\n request = Request.where(user_id: [params[:user_id]])\n render :json => request.as_json\n end", "def product(name)\n get(\"/apiproducts/#{name}\")\n end", "def index\n @products_grupos = ProductsGrupo.all\n end", "def show\n json_response(@api_v1_product)\n end", "def get_products_with_http_info(opts = {})\n if @api_client.config.debugging\n @api_client.config.logger.debug 'Calling API: PricesApi.get_products ...'\n end\n # resource path\n local_var_path = '/v1/products'\n\n # query parameters\n query_params = {}\n query_params[:'provider_id'] = opts[:'provider_id'] if !opts[:'provider_id'].nil?\n query_params[:'service_id'] = opts[:'service_id'] if !opts[:'service_id'].nil?\n query_params[:'name'] = opts[:'name'] if !opts[:'name'].nil?\n\n # header parameters\n header_params = {}\n # HTTP header 'Accept' (if needed)\n header_params['Accept'] = @api_client.select_header_accept(['application/json'])\n\n # form parameters\n form_params = {}\n\n # http body (model)\n post_body = nil\n auth_names = ['oauth2']\n data, status_code, headers = @api_client.call_api(:GET, local_var_path,\n :header_params => header_params,\n :query_params => query_params,\n :form_params => form_params,\n :body => post_body,\n :auth_names => auth_names,\n :return_type => 'Products')\n if @api_client.config.debugging\n @api_client.config.logger.debug \"API called: PricesApi#get_products\\nData: #{data.inspect}\\nStatus code: #{status_code}\\nHeaders: #{headers}\"\n end\n return data, status_code, headers\n end", "def getTokenProduct( entity_id, product_id, language, portal_name, flatpack_id, source, channel, campaign)\n params = Hash.new\n params['entity_id'] = entity_id\n params['product_id'] = product_id\n params['language'] = language\n params['portal_name'] = portal_name\n params['flatpack_id'] = flatpack_id\n params['source'] = source\n params['channel'] = channel\n params['campaign'] = campaign\n return doCurl(\"get\",\"/token/product\",params)\n end", "def show\n @products = Product.find_by_id(params[:id])\n msg = { status: 200 , product: @products }\n respond_to do |format|\n format.html { render json: msg }\n format.json { render json: msg }\n end\n end", "def index\n @cart_products = if current_user.cart.present?\n current_user.cart.carts_products.includes(:product).all\n else\n []\n end\n render formats: :json\n end", "def index\n @products = current_company.products.order('created_at desc').page(params[:page]).per(20)\n render json: @products, meta: {total_pages: @products.total_pages, total_count: @products.total_count}\n end", "def show\n render json: @auth_ip_address_group, status: :ok\n end", "def products(params = {})\n @products ||= product_ids.map do |product_id|\n client.products.find(product_id)\n end\n end", "def show\n authenticate_rem\n follower = @current_user || User.find_by_id(params[:user_id])\n render json: Product.find(params[:id]).simple_info(follower), status: :ok\n end", "def auth_projects\n projects =\n services.identity.auth_projects(@scoped_domain_id).sort_by(&:name)\n render json: { auth_projects: projects }\n end", "def index\n @food_products = FoodProduct.where(group_id: @groups)\n end", "def view_product\n to_json(\n only: [:id, :title, :description, :key_information],\n methods: [:photo_url, :net_mrp, :mrp_per_unit, :quantity],\n :include => {\n store: {\n only: [:name, :id],\n methods: [:full_address]\n }\n }\n )\n end", "def new\n\n #@product = Product.new\n @product = current_user.products.build if signed_in?\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @product }\n end\n end", "def index\n\n#client = Google::APIClient.new\n\n@products = Product.paginate(:page => params[:page], :per_page => 30)\n@manufacturers = Product.uniq.pluck(:manufacturer)\n@categories = Product.uniq.pluck(:type_b)\n@sub_categories = Product.uniq.pluck(:sub_type)\n\nend", "def products_overview\n\n @products_quantities = {}\n @boxes = Box.all\n\n @boxes.each do |box|\n quantities = GroupBoxContent.sum(:quantity,group: :product_id, conditions:{\"group_boxes.box_id\" => box.id},joins: :box)\n quantities.delete_if {|key, value| value == 0 }\n\n products = Product.all\n products.each do |product|\n @products_quantities[product] ||= {}\n @products_quantities[product][box] ||= quantities[product.id]\n end\n end\n\n respond_to do |format|\n format.html # products_overview.html.erb\n format.json { render json: @groups }\n end\n end", "def index\n b_admin = current_user.admin? rescue false\n @products = Product.filter_by_params(b_admin, params)\n #@products = Product.available\n \n @title = Product.page_description(params)\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @products }\n end\n end", "def index\n @host_groups = HostGroup.with_permissions_to(:show)\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @host_groups }\n end\n end", "def index\n @groups_with_el = Provider.pluck(:group_id).uniq - [nil, 0]\n \n @groups = Provider.where(is_group: true).order(:name).map{|g| {\n id: g.id, \n name: g.name,\n spec: g.spec,\n allow_delete: !@groups_with_el.include?(g.id),\n goods_types_array: g.goods_type_names_array.join(', ')\n }}\n\n store_providers_path\n end", "def index\n @user_interactive_products = UserInteractiveProduct.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @user_interactive_products }\n end\n end", "def product_images\n user_id, product = params[:user_id], params[:id]\n return bad_request if !user_id || !product\n # returns all images for a given user and product\n images = UserProduct.find_images(user_id, product)\n # create json array\n img = images ? images.collect { |i| i.js_serialize } : []\n render :json => img\n end", "def index\n @products = Product.all\n render json: @products\n end", "def get_products( session_key)\n response_xml = self.call( :get_products, message: {\n arg0: session_key\n })\n response = IssueCentre::Response.parse( response_xml)\n end", "def show\n if is_my_resource(@product.prossumer_id)\n render json: @product.as_json({\n cycle_id: params[:cycle_id],\n include: {\n prossumer: {\n except: [:encrypted_password, :salt, :confirm_hash]\n },\n product_category: {}\n }\n })\n end\n end", "def products\n Product.find_all_by_vendor_id(@id)\n end", "def show_cart\n render json: User.find(params[:id]).cart_products\n end", "def authorize_product!(product)\n unless product.nil?\n unless current_user.products.include?(product)\n raise Exceptions::ProductAccessDenied\n end\n end\n end", "def set_products_grupo\n @products_grupo = ProductsGrupo.find(params[:id])\n end", "def show\n #logger.info \"GroupsController Get Parameters: #{params}\"\n if @group\n render json: @group.to_json(:include => {:memberships => {:only => [:admin], :include => {:user => {:only => [:id, :first_name, :last_name, :email]}}}})\n else\n render json: {error: \"YOU MUST BE MEMBER OF THIS GROUP TO SEE IT\"}, status: :unprocessable_entity\n end\n end", "def harvest_products\n product_type = params[:product_type]\n package_type = params[:package_type]\n cultivation_batch_id = params[:cultivation_batch_id]\n\n catalogue = Inventory::Catalogue.find_by(label: product_type, category: 'raw_sales_product')\n cultivation_batch = Cultivation::Batch.find(cultivation_batch_id)\n facility = cultivation_batch.facility\n facility_strain = cultivation_batch.facility_strain\n\n product = Inventory::Product.find_by(\n facility: facility,\n facility_strain: facility_strain,\n catalogue: catalogue,\n package_type: package_type,\n )\n\n packages = Inventory::ItemTransaction.where(\n catalogue: catalogue,\n product: product,\n ).\n order(created_at: :desc)\n\n packages_json = packages.map do |x|\n {\n id: x.id.to_s,\n tag: x.package_tag,\n product_id: x.product.id.to_s,\n product_type: x.catalogue.label,\n package_type: x.product.package_type,\n event_type: x.event_type,\n }\n end\n\n render json: packages_json, status: 200\n end", "def admin_list\n @products = Product.find(:all)\n end", "def get_accessible_products\n return call('Product.get_accessible_products')\n end", "def product_params\n if @current_user.shops.where(id: params[:product][:shop_id]).first\n @product_params = params.require(:product).permit(:name,\n :description,\n :price,\n :rating,\n :shop_id,\n :category_id)\n true\n else\n render(json: { error: 'Not authorized' }, status: :unauthorized)\n false\n end\n end", "def show_group\n group_name = params[:name]\n @group = $iam.groups[group_name]\n @users = @group.users\n @policies = @group.policies\n end", "def index\n @groups = current_user.groups\n @groups = @groups.where('name LIKE ?', \"%#{params[:q]}%\") if params[:q].present?\n @groups = current_user.groups_sorted_by_admin_state_and_name(@groups)\n @groups_pictures = Group.group_images_hash_for_groups @groups\n\n respond_to do |format|\n format.html { render :index }\n format.json { render json: @groups }\n end\n end", "def show\n @product = Product.find(params[:id]) \n @admin_images = Image.admins_photos.where(product_id: @product.id).order(:title) || []\n @users_images = Image.users_photos.where(product_id: @product.id).order(:title) || []\n respond_to do |format|\n format.html\n format.js \n format.json { render json: @product }\n end\n end", "def get_groups\n @course = Course.find(params[:course_id])\n render json: @course.groups\n end", "def index\n @products = Product.all\n render json: {is_success: true, error_code: 200, message: \"Products Found Successfully\", result: @products}, status: 200\n end", "def add_product\n @user = User.find_by(id: params['user'])\n products = params['products'].split(',')\n products.each do |product_id|\n @user.cart.products << Product.find_by(id: product_id)\n end \n render json: {data: @user.cart.products }\n end", "def index\n \tproducts = Product.all\n \trender json: products\n \tend", "def index\n @occasions = current_user.occasions.sorteddesc.all.paginate(page: params[:page], :per_page => 10)\n @products = Product.all\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @occasions }\n end\n end", "def index\n @auth_shopping_cart_items = @auth_shopping_cart_item_class.find_cart_items({:resource => lookup_resource}).page 1\n respond_with @auth_shopping_cart_items\n end" ]
[ "0.77223885", "0.6924568", "0.6359968", "0.63030624", "0.6209846", "0.6191031", "0.60274035", "0.5961265", "0.5942114", "0.5941632", "0.5930532", "0.59239817", "0.5889648", "0.58429676", "0.5788566", "0.577226", "0.57682544", "0.57664394", "0.57484937", "0.5739547", "0.5729895", "0.5712798", "0.5711819", "0.5700632", "0.567235", "0.5665912", "0.56289184", "0.56221277", "0.56205547", "0.56176275", "0.5607604", "0.5604963", "0.5595955", "0.55661476", "0.55560344", "0.55469894", "0.55391383", "0.54970455", "0.54884493", "0.54802835", "0.54722184", "0.54712236", "0.546555", "0.54655004", "0.5464068", "0.54634964", "0.54561824", "0.5451043", "0.54477733", "0.54232264", "0.5418329", "0.54125655", "0.5412108", "0.54090625", "0.5398841", "0.5392242", "0.5391263", "0.5385351", "0.5385092", "0.5383012", "0.5380466", "0.5377039", "0.5376805", "0.53743756", "0.5369576", "0.53693664", "0.53588116", "0.53481394", "0.5346175", "0.53453565", "0.5322163", "0.5308744", "0.5303941", "0.53020054", "0.5294583", "0.5293306", "0.52853197", "0.5284288", "0.5283686", "0.52829", "0.52807045", "0.5269679", "0.52610683", "0.52596605", "0.5243072", "0.5240863", "0.52388704", "0.5234596", "0.5231508", "0.52302283", "0.5220657", "0.5218347", "0.521667", "0.5211791", "0.5208774", "0.52046955", "0.52014416", "0.5199832", "0.5198511", "0.51952136" ]
0.74003464
1
POST /groups/1/products_auths POST /groups/1/products_auths.json
def create params[:state] = 1; @product_auth = ProductAuth.new(params.permit(:state, :product_id, :group_id, :ecos, :euros)); if @product_auth.save render json: @product_auth else render json: @product_auth.errors end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index\n render json: ProductAuth.where({group_id: params[:group_id]})\n end", "def get_authorization_products_with_http_info(opts = {})\n if @api_client.config.debugging\n @api_client.config.logger.debug \"Calling API: AuthorizationApi.get_authorization_products ...\"\n end\n \n # resource path\n local_var_path = \"/api/v2/authorization/products\".sub('{format}','json')\n\n # query parameters\n query_params = {}\n\n # header parameters\n header_params = {}\n\n # HTTP header 'Accept' (if needed)\n local_header_accept = ['application/json']\n local_header_accept_result = @api_client.select_header_accept(local_header_accept) and header_params['Accept'] = local_header_accept_result\n\n # HTTP header 'Content-Type'\n local_header_content_type = ['application/json']\n header_params['Content-Type'] = @api_client.select_header_content_type(local_header_content_type)\n\n # form parameters\n form_params = {}\n\n # http body (model)\n post_body = nil\n \n auth_names = ['PureCloud OAuth']\n data, status_code, headers = @api_client.call_api(:GET, local_var_path,\n :header_params => header_params,\n :query_params => query_params,\n :form_params => form_params,\n :body => post_body,\n :auth_names => auth_names,\n :return_type => 'OrganizationProductEntityListing')\n if @api_client.config.debugging\n @api_client.config.logger.debug \"API called: AuthorizationApi#get_authorization_products\\nData: #{data.inspect}\\nStatus code: #{status_code}\\nHeaders: #{headers}\"\n end\n return data, status_code, headers\n end", "def show\n # is_my_resource(params[:id])\n\n # prossumerProductsIds = Prossumer.find(params[:id]).products.ids\n render json: ProductAuth.where({product_id: params[:id], group_id: params[:group_id]}).first.as_json(:include => :product)\n end", "def create_product_with_api\n user = User.find_by(authentication_token: params[:authentication_token])\n if user.present?\n product = user.products.new(product_params)\n if product.save\n json_response({success: true,data: {:product => product},message: \"Product is created successfully\"\n }, 201)\n else\n json_response({success: false,data: {:product => product},errors: product.errors.messages,message: \"Validation faild\"}, 422)\n end\n else\n json_response({\n success: false,\n message: \"Token is invalid\"\n }, 400)\n end\n end", "def create\n @product_product_group = ProductProductGroup.new(product_product_group_params)\n\n respond_to do |format|\n if @product_product_group.save\n format.html { redirect_to @product_product_group, notice: 'Product product group was successfully created.' }\n format.json { render :show, status: :created, location: @product_product_group }\n else\n format.html { render :new }\n format.json { render json: @product_product_group.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product_group = ProductGroup.new(product_group_params)\n\n respond_to do |format|\n if @product_group.save\n format.html { redirect_to @product_group, notice: \"Product group was successfully created.\" }\n format.json { render :show, status: :created, location: @product_group }\n else\n format.html { render :new, status: :unprocessable_entity }\n format.json { render json: @product_group.errors, status: :unprocessable_entity }\n end\n end\n end", "def create_product_group\n ProductGroup.create(:group_id => self.group_id, :product_id => self.product_id)\n return true\n end", "def create\n @product_group = ProductGroup.new(params[:product_group])\n\n respond_to do |format|\n if @product_group.save\n format.html { redirect_to @product_group, notice: 'Product group was successfully created.' }\n format.json { render json: @product_group, status: :created, location: @product_group }\n else\n format.html { render action: \"new\" }\n format.json { render json: @product_group.errors, status: :unprocessable_entity }\n end\n end\n end", "def addProd()\n if(!authenticateAdmin(params[:admin_id], params[:admin_auth_key]))\n render json: {status: false, reason: \"Authentication Failed\", data: \"\"}\n return\n end\n p = Product.new(name: params[:name], price: params[:price].to_f, category_id: params[:cat_id], picture_list: '[]')\n status = p.save\n error = \"\"\n if(p.errors.full_messages.count > 0)\n error = c.errors.full_messages[0]\n end\n render json: {status: status, reason: error, data: \"\"}\n end", "def create\n @group_product = GroupProduct.new(group_product_params)\n\n respond_to do |format|\n if @group_product.save\n format.html { redirect_to @group_product, notice: 'Group product was successfully created.' }\n format.json { render :show, status: :created, location: @group_product }\n else\n format.html { render :new }\n format.json { render json: @group_product.errors, status: :unprocessable_entity }\n end\n end\n end", "def add_product\n @user = User.find_by(id: params['user'])\n products = params['products'].split(',')\n products.each do |product_id|\n @user.cart.products << Product.find_by(id: product_id)\n end \n render json: {data: @user.cart.products }\n end", "def get_authorization_products(opts = {})\n data, _status_code, _headers = get_authorization_products_with_http_info(opts)\n return data\n end", "def create\n @product = Product.new(params[:product])\n respond_to do |format|\n if @product.save\n current_user.user_info.products.push @product\n Shopify.create @product\n format.html { redirect_to :action => 'index' }\n format.json { render json: @product, status: :created, location: @product }\n else\n format.html { render action: \"new\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @products = Product.all\n order_products = []\n @products.each do |p|\n sym = \"product_#{p.id}\".to_sym\n if params[sym].present?\n count = params[sym].to_i\n if count > 0\n order_product = OrderProduct.new(product: p, count: count)\n order_products << order_product\n end\n end\n end\n\n if order_products.size > 0\n order = Order.new(user: current_user)\n order.save!\n order_products.each do |i|\n i.order = order\n i.save!\n end\n redirect_to order_path(order.id)\n else\n redirect_to new_order_path\n end\n end", "def update\n @product_auth = ProductAuth.where({product_id: params[:id], group_id: params[:group_id]}).first\n\n if @product_auth.update(params.permit(:state))\n render json: @product_auth\n else\n render json: @product_auth.errors\n end\n end", "def create\n @product = Product.new(product_params)\n if current_user.is_provider\n @product.provider = current_user.provider\n end\n @product.user = @product.provider.user\n respond_to do |format|\n if @product.save\n if params.key?(:pictures) && params[:pictures].present?\n params[:pictures].each do |picture_file|\n @product.pictures.create(picture_file: picture_file)\n end\n end\n format.html { redirect_to admin_product_path(@product), notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n @product.user = current_api_v1_user\n respond_to do |format|\n if @product.save\n params[:product][:properties].try(:each) do |k,v|\n @product.product_properties.create(property: Property.find(k), value: v)\n end\n params[:product][:colors].try(:each) do |c|\n @product.colors.create(name: c[:name].downcase, code: c[:code])\n end\n params[:product][:photos].try(:each) do |c|\n @product.photos.create(image: c)\n end\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created}\n else\n format.html { render :new }\n format.json { render json: @product.errors.full_messages, status: :unprocessable_entity }\n end\n end\n end", "def create\n @products_grupo = ProductsGrupo.new(products_grupo_params)\n\n respond_to do |format|\n if @products_grupo.save\n format.html { redirect_to @products_grupo, notice: 'Products grupo was successfully created.' }\n format.json { render :show, status: :created, location: @products_grupo }\n else\n format.html { render :new }\n format.json { render json: @products_grupo.errors, status: :unprocessable_entity }\n end\n end\n end", "def postProductProvisioningAdvert( product_id, publisher_id, max_tags, max_locations)\n params = Hash.new\n params['product_id'] = product_id\n params['publisher_id'] = publisher_id\n params['max_tags'] = max_tags\n params['max_locations'] = max_locations\n return doCurl(\"post\",\"/product/provisioning/advert\",params)\n end", "def create\n @product = @collection.products.build(product_params)\n\n if @product.save\n render json: @product, status: :created#, location: @collection\n else\n render json: @product.errors, status: :unprocessable_entity\n end\n end", "def product_product_group_params\n params.require(:product_product_group).permit(:product_id, :product_group_id)\n end", "def create\n project = Project.find(params[:project_id])\n if check_user_permission(project)\n @product = project.products.new(product_params)\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: \"Product was successfully created.\" }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end\n end", "def postProductProvisioningClaim( product_id)\n params = Hash.new\n params['product_id'] = product_id\n return doCurl(\"post\",\"/product/provisioning/claim\",params)\n end", "def create\n @product = @productable.products.find(params[:product_id])\n ids = params[:products]\n reals = @productable.products.map(&:id) # Make a lambda with this\n present_prod = ids.select { |n| reals.include?(n) } # And this\n if @product.update(product_relations: present_prod)\n render json: @product, status: 200\n else\n render json: @product.errors, status: 422\n end\n end", "def products_grupo_params\n params.require(:products_grupo).permit(:code, :name, :user_id)\n end", "def create\n #logger.info \"Post parameters: #{params}\"\n @group = Group.new(name: params[:group][:name], expiration: params[:group][:expiration], owner: current_user)\n if @group.save\n @group.memberships.create!(user: current_user, admin: true)\n if params[:group][:users]\n params[:group][:users].each do |u|\n @group.memberships.create!(user: User.where(\"id = ? OR email = ?\", u[:id], u[:email]).first, admin:u[:admin])\n end\n end\n render json: @group, status: :created, location: @group\n else\n render json: @group.errors, status: :unprocessable_entity\n end\n end", "def user_products\n @products = current_user.products\n\n respond_to do |format|\n format.html\n format.json { render json: @products }\n end\n end", "def create\n @product = Product.new(product_params)\n authorize @product\n respond_to do |format|\n if @product.save\n format.html { redirect_to products_path, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def index\n @product_product_groups = ProductProductGroup.all\n end", "def patch_params\n if params[\"product_group\"] and params[\"product_group\"][\"product_scopes_attributes\"].is_a?(Array)\n params[\"product_group\"][\"product_scopes_attributes\"] = params[\"product_group\"][\"product_scopes_attributes\"].group_by {|a| a[\"id\"]}.map do |scope_id, attrs|\n a = { \"id\" => scope_id, \"arguments\" => attrs.map{|a| a[\"arguments\"] }.flatten }\n if name = attrs.first[\"name\"]\n a[\"name\"] = name\n end\n a\n end\n end\n end", "def create\n #logger.info \"Post parameters: #{params}\"\n @group = Group.new(name: params[:group][:name], expiration: params[:group][:expiration])\n if @group.save\n params[:group][:users].each do |u|\n Membership.create(group: @group, user: User.where(\"id = ? OR email = ?\", u[:id], u[:email]).first, admin:u[:admin])\n end\n render json: @group, status: :created, location: @group\n else\n render json: @group.errors, status: :unprocessable_entity\n end\n end", "def create\n @feature_group = FeatureGroup.new(feature_group_params)\n @product = Product.find(params[:product_id])\n @feature_group.product_id = @product.id\n respond_to do |format|\n if @feature_group.save\n format.html { redirect_to @product, notice: 'Feature group was successfully created.' }\n format.json { render action: 'show', status: :created, location: @feature_group }\n else\n format.html { render action: 'new' }\n format.json { render json: @feature_group.errors, status: :unprocessable_entity }\n end\n end\n end", "def set_end_products\n case params[:type]\n when \"full\"\n BGSService.end_product_data = BGSService.existing_full_grants\n when \"partial\"\n BGSService.end_product_data = BGSService.existing_partial_grants\n when \"none\"\n BGSService.end_product_data = BGSService.no_grants\n when \"all\"\n BGSService.end_product_data = BGSService.all_grants\n end\n\n redirect_to \"/test/users\"\n end", "def create\n # Avoid double provisioning: previous url would be \"/provision/new?apps[]=vtiger&organization_id=1\"\n session.delete('previous_url')\n\n @organization = current_user.organizations.to_a.find { |o| o.id && o.id.to_s == params[:organization_id].to_s }\n authorize! :manage_app_instances, @organization\n\n app_instances = []\n params[:apps].each do |product_name|\n app_instance = @organization.app_instances.create(product: product_name)\n app_instances << app_instance\n MnoEnterprise::EventLogger.info('app_add', current_user.id, 'App added', app_instance)\n end\n\n render json: app_instances.map(&:attributes).to_json, status: :created\n end", "def new\n @product_group = ProductGroup.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @product_group }\n end\n end", "def create\n @product_tree_group = ProductTreeGroup.new product_tree_group_params\n\n respond_to do |format|\n if @product_tree_group.save\n format.html { redirect_to admin_product_tree_group_path(@product_tree_group), notice: '产品新建成功.' }\n format.json { render json: @product_tree_group, status: :created, location: @product_tree_group }\n else\n format.html { render action: \"new\" }\n format.json { render json: @product_tree_group.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @products_color = ProductsColor.new(products_color_params)\n authorize @products_color\n\n respond_to do |format|\n if @products_color.save\n format.html { redirect_to products_path, notice: 'Products color was successfully created.' }\n format.json { render :show, status: :created, location: @products_color }\n else\n format.html { render :new }\n format.json { render json: @products_color.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @admin_product = Admin::Product.new(admin_product_params)\n\n respond_to do |format|\n if @admin_product.save\n format.html { redirect_to admin_products_url }\n format.json { render :show, status: :created, location: @admin_product }\n else\n format.html { render :new }\n format.json { render json: @admin_product.errors, status: :unprocessable_entity }\n end\n end\n end", "def index\n @product_groups = ProductGroup.all\n end", "def create\n @product = Product.new(product_params)\n @product.user_id = current_user.id\n @product.location = current_user.location\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n\n if current_user.products.count == 5 && current_user.badges(name: \"Super Producer\").blank?\n Badge.create(user_id: current_user.id, name: \"Super Producer\")\n end\n\n if current_user.products.where(perishables: true).count == 5 && current_user.badges(name: \"Green Thumb\").blank?\n Badge.create(user_id: current_user.id, name: \"Green Thumb\")\n end\n end", "def create\n @product = current_user.products.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: \"Product was successfully created.\" }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new, status: :unprocessable_entity }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def product_groups\n @product_groups ||= ProductGroupProxy.new(self)\n end", "def create\n @product = current_user.products.new(params[:product])\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render json: @product, status: :created, location: @product }\n else\n format.html { render action: \"new\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def add_product_to_cart token, cart_id, product_code, qty = 1\n\n\t\tusername = \"[email protected]\" \n\t\tproduct_code = '3756505'\n\n\t\turl = 'http://localhost:9001/rest/v2/powertools/users/' + username + '/carts/' + cart_id.to_s + '/entries?code=' + product_code.to_s + '&qty=' + qty.to_s\n\t\tputs url\n\t\tclient = RestClient::Resource.new(url, :verify_ssl => OpenSSL::SSL::VERIFY_NONE)\n\n\t\tresponse = client.put(:Authorization => token )\n\n\t\tputs response.body\t\n\tend", "def create\n if user_signed_in?\n @product = current_user.products.new(product_params)\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end\n end", "def set_products_grupo\n @products_grupo = ProductsGrupo.find(params[:id])\n end", "def create\n newProduct = Product.new(products_params)\n if newProduct.save\n msg = { status: 201 , product: newProduct }\n respond_to do |format|\n format.html { render json: msg }\n format.json { render json: msg }\n end\n else\n msg = { status: 422 }\n respond_to do |format|\n format.html { render json: msg }\n format.json { render json: msg }\n end\n end\n end", "def create\n # all_tags = params[:all_tags]\n if current_user.id == 1\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n # @product.all_tags = all_tags\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n else\n respond_to do |format|\n format.html { render :index }\n # format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = current_user.products.build(products_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Produkt utworzony pomyślnie' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @my_product = current_user.products.new(my_product_params)\n\n if @my_product.save\n render :show, status: :created\n else\n render json: @my_product.errors, status: :unprocessable_entity\n end\n end", "def create\n return unless product_params\n render json: Product.create_product(\n @product_params,\n category_list,\n @current_user.id\n ).simple_info, status: :created\n rescue => e\n render json: { error: e }, status: :bad_request\n end", "def index\n @products = Product.all.page(params[:page]).per(10)\n authorize Product\n end", "def create\n if params[:products]\n params[:products].each do |product|\n @product = Product.new(name: product[:name],\n brand: product[:brand],\n model: product[:model],\n sku: product[:sku],\n price: product[:price],\n desc: product[:desc])\n if [email protected]\n render json: @product.errors.full_messages, status: 422\n end\n end\n render 'api/products/index'\n else\n @product = Product.new(product_params)\n if @product.save\n render 'api/products/show'\n else\n render json: @product.errors.full_messages, status: 422\n end\n end\n end", "def product_params\n if @current_user.shops.where(id: params[:product][:shop_id]).first\n @product_params = params.require(:product).permit(:name,\n :description,\n :price,\n :rating,\n :shop_id,\n :category_id)\n true\n else\n render(json: { error: 'Not authorized' }, status: :unauthorized)\n false\n end\n end", "def create\n @product = Product.new(product_args)\n\n if @product.save\n render json: Product.all, status: :created\n else\n render json: @product.errors, status: :unprocessable_entity\n end\n end", "def authorize_product!(product)\n unless product.nil?\n unless current_user.products.include?(product)\n raise Exceptions::ProductAccessDenied\n end\n end\n end", "def create\n @product = current_user.products.build(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = current_user.products.new(name: product_params[:name],description:product_params[:description],price:product_params[:price])\n @product.brand = Brand.find_by(brandName: product_params[:brand])\n @product.categories = Category.where(category:product_params[:category])\n respond_to do |format|\n if @product.save \n format.html { redirect_to new_product_location_path(@product), notice: \"Product was successfully created.\"}\n format.json { render json: @product}\n \n else\n format.html { render :new, status: :unprocessable_entity }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n authorize! :manage, @product , :message => \"Access denied.\"\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def product_group_params\n params.require(:product_group).permit(:name, :title)\n end", "def products_aggregate\n quantities = GroupBoxContent.sum(:quantity,group: :product_id)\n quantities.delete_if {|key, value| value == 0 } \n\n products = Product.find(quantities.keys)\n @products_quantities = {}\n products.each do |product|\n @products_quantities[product] = quantities[product.id]\n end\n\n respond_to do |format|\n format.html # products_aggregate.html.erb\n format.json { render json: @groups }\n end\n end", "def create\n @product = Product.new(product_params)\n @product.categories.clear\n if params[:category_ids].present?\n params[:category_ids].each do |id| \n @product.categories << Category.find(id)\n end\n end\n \n @product.areas.clear\n if params[:area_ids].present?\n params[:area_ids].each do |id| \n @product.areas << Area.find(id)\n end\n end\n \n @product.articles.clear\n if params[:article_ids].present?\n params[:article_ids].each do |id| \n @product.articles << Article.find(id)\n end\n end\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to edit_admin_product_path(@product.id), notice: 'Product was successfully created.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def get_products_by_ids\n products_ids = params[:products_ids]\n products = Array.new\n JSON.parse(products_ids).each do |product_id|\n product = Product.find_by_id(product_id)\n if product\n products.push(product)\n end\n end\n render json: {products: serialized_products(products)}, status: :ok\n end", "def product_list\n current_user.products.order('name').collect {|p| [ p.name, p.ticket_project_id, p.id ]}\n end", "def create\n if current_user\n @order = current_user.orders.create(product_id: params[:product_id])\n redirect_to orders_path\n end\n end", "def create\n authorize! :manage, @product, :message => 'Not authorized as an administrator'\n @product = Product.new(params[:product])\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: 'Product was successfully created.' }\n format.json { render json: @product, status: :created, location: @product }\n else\n format.html { render action: \"new\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @group = Group.find(params[:group_id])\n @purchase = @group.purchases.build(purchase_params)\n @purchase.user = current_user\n if @purchase.make_purchase(purchase_params[:item_ids])\n redirect_to group_purchase_path(@group, @purchase), notice: 'purchase was successfully created!'\n else\n render action: 'new'\n end\n end", "def deleteProd()\n if(!authenticateAdmin(params[:admin_id], params[:admin_auth_key]))\n render json: {status: false, reason: \"Authentication Failed\", data: \"\"}\n return\n end\n p = Product.find(params[:id])\n status = p.destroy\n error = \"\"\n if(p.errors.full_messages.count > 0)\n error = c.errors.full_messages[0]\n end\n render json: {status: true, reason: error, data: \"\"}\n end", "def object_params\n if params[\"product_group\"] and params[\"product_group\"][\"product_scopes_attributes\"].is_a?(Array)\n params[\"product_group\"][\"product_scopes_attributes\"] = params[\"product_group\"][\"product_scopes_attributes\"].group_by {|a| a[\"id\"]}.map do |scope_id, attrs|\n a = { \"id\" => scope_id, \"arguments\" => attrs.map{|a| a[\"arguments\"] }.flatten }\n if name = attrs.first[\"name\"]\n a[\"name\"] = name\n end\n a\n end\n end\n params[\"product_group\"]\n end", "def create\n @group = Group.new(group_params)\n token = params[:token]\n\n # use the user login instance and match emails to find current user\n @user_login = UserLogin.where(token: token).take\n @current_user = User.where(email: @user_login.email).take\n\n respond_to do |format|\n if @group.save\n\n # create a new group membership for new group w/ current user as admin\n @new_membership = GroupMembership.create(group_id: @group.id, user_id: @current_user.id, is_admin: true)\n\n # associate new membership with the group and the user\n @group.group_memberships << @new_membership\n @current_user.group_memberships << @new_membership\n\n format.html { redirect_to group_path(:id => @group.id), notice: 'Group was successfully created.' }\n format.json { render :show, status: :created, location: @group }\n else\n format.html { render :new }\n format.json { render json: @group.errors, status: :unprocessable_entity }\n end\n end\n end", "def products_path\n \tmain_app.send(Auth::OmniAuth::Path.create_or_index_path(Auth.configuration.product_class))\n end", "def add_product_to_wardrobe\n if current_user.products.find_by_id(params[:product_id]) == nil\n\n current_user.products << Product.find(params[:product_id])\n product = search_by_id(params[:product_id])\n\n render json: {status: 0, data: {product: product}}\n else\n\n render json: {status: 1, data: nil}\n end\n\n end", "def create\n @product_management = ProductManagement.new(product_management_params)\n\n if @product_management.save\n render json: @product_management, status: :created, location: @product_management\n else\n render json: @product_management.errors, status: :unprocessable_entity\n end\n end", "def products\n request :public, :get, :products\n end", "def check_permissions\n authorize! :create, Product\n end", "def index\n @group_products = GroupProduct.all\n end", "def find_products\n\n product_ids = session[:compare_products] || []\n if product_ids.length > 4\n flash[:notice] = I18n.t('compare_products.limit_is_4')\n product_ids = product_ids[0..3]\n elsif product_ids.length < 1\n flash[:error] = I18n.t('compare_products.insufficient_data')\n redirect_to \"/t/#{@taxon.permalink}\"\n end\n @products = Spree::Product.find(:all, :conditions => { :id => product_ids},\n :include => { :product_properties => :property },\n :limit => 4)\n end", "def set_product_group\n @product_group = ProductGroup.find(params[:id])\n end", "def harvest_products\n product_type = params[:product_type]\n package_type = params[:package_type]\n cultivation_batch_id = params[:cultivation_batch_id]\n\n catalogue = Inventory::Catalogue.find_by(label: product_type, category: 'raw_sales_product')\n cultivation_batch = Cultivation::Batch.find(cultivation_batch_id)\n facility = cultivation_batch.facility\n facility_strain = cultivation_batch.facility_strain\n\n product = Inventory::Product.find_by(\n facility: facility,\n facility_strain: facility_strain,\n catalogue: catalogue,\n package_type: package_type,\n )\n\n packages = Inventory::ItemTransaction.where(\n catalogue: catalogue,\n product: product,\n ).\n order(created_at: :desc)\n\n packages_json = packages.map do |x|\n {\n id: x.id.to_s,\n tag: x.package_tag,\n product_id: x.product.id.to_s,\n product_type: x.catalogue.label,\n package_type: x.product.package_type,\n event_type: x.event_type,\n }\n end\n\n render json: packages_json, status: 200\n end", "def obtains_product\n product = Product.find(params[:product_id])\n @product = product.user_id == @current_user.id ? product : nil\n (render(json: { e: 'AUTH' }, status: :unauthorized) && nil) if @product.nil?\n end", "def create_product(add_params = nil)\n params = {\n uid: uid,\n }\n api_call('/stores/:uid/products(.:format)',:post,params,add_params)\n end", "def set_product_product_group\n @product_product_group = ProductProductGroup.find(params[:id])\n end", "def create\n @product = Product.new(params[:product].merge :user_id => current_user.id)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to root_url, notice: 'Product was successfully created.' }\n format.json { render json: @product, status: :created, location: @product }\n else\n format.html { render action: \"new\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def product_params\n params.require(:product).permit(:products)\n end", "def deleteProductProvisioning( product_id, gen_id)\n params = Hash.new\n params['product_id'] = product_id\n params['gen_id'] = gen_id\n return doCurl(\"delete\",\"/product/provisioning\",params)\n end", "def getTokenProduct( entity_id, product_id, language, portal_name, flatpack_id, source, channel, campaign)\n params = Hash.new\n params['entity_id'] = entity_id\n params['product_id'] = product_id\n params['language'] = language\n params['portal_name'] = portal_name\n params['flatpack_id'] = flatpack_id\n params['source'] = source\n params['channel'] = channel\n params['campaign'] = campaign\n return doCurl(\"get\",\"/token/product\",params)\n end", "def create\n @product_product = Product::Product.new(product_product_params)\n @product_product.user_created_id = current_user.id\n respond_to do |format|\n if @product_product.save\n @product_product.activar_producto\n format.html { redirect_to product_products_path, notice: I18n.t('products.controller.create') }\n format.json { render :show, status: :created, location: @product_product }\n else\n format.html { render :new }\n format.json { render json: @product_product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = current_user.products.build(product_params)\n if @product.save\n flash[:success] = 'Contato criado com sucesso.'\n redirect_to user_products_path\n else\n render 'new'\n end\n end", "def create\n @product = Product.create!(product_params)\n json_response(@product, :created)\n end", "def create\n authorize! :create, @product\n @brands = Brand.all\n @categories = Category.all\n @product = Product.new(product_params)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to products_path, notice: 'El producto fue creado exitosamente.' }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @user = current_user\n @order = @user.order.new(order_params)\n respond_to do |format|\n if @order.save\n group_params['group_ids'].each do |g|\n gu = GroupUser.where(group_id: g)\n gu.each do |u|\n @u = User.find(u.user_id)\n @u.orders << Order.find(@order.id)\n end\n end\n\n format.html { redirect_to orders_path, notice: 'Order was successfully created.' }\n # format.json { render :show, status: :created, location: @order }\n else\n format.html { render :new }\n # format.json { render json: @order.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @product = Product.new(product_params)\n\n if params[:product][:store_ids].present?\n params[:product][:store_ids].each do |store_id|\n unless store_id.empty?\n store = Store.find(store_id)\n @product.stores << store\n end\n end\n end\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: \"Product was successfully created.\" }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new, status: :unprocessable_entity }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def generate(groups)\n groups_params = groups.inject({}) do |params, (k, v)|\n params[\"groups[#{k}]\"] = 1\n params\n end\n\n response = RouteNGN.put self.class.base_url, {:id => self.id}.merge!(groups_params)\n response.success?\n end", "def index\n @products_colors = ProductsColor.all\n authorize ProductsColor\n end", "def new\n\n #@product = Product.new\n @product = current_user.products.build if signed_in?\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @product }\n end\n end", "def get_products\n products = response['products']\n products_ops = []\n products.each_value do |value|\n product = {}\n product[:sku] = value['sku']\n product[:product_family] = value['productFamily']\n product[:pricing_list_id] = pricing_list.id\n attributes = value['attributes']\n product[:service_code] = attributes['servicecode']\n product[:location] = attributes['location']\n product[:location_type] = attributes['locationType']\n product[:usage_type] = attributes['usagetype']\n product[:operation] = attributes['operation']\n product[:request_description] = attributes['requestDescription']\n product[:request_type] = attributes['requestType']\n product[:service_name] = attributes['servicename']\n product[:pricing_list_id] = pricing_list.id\n product[:created_at] = Time.zone.now.utc\n product[:updated_at] = Time.zone.now.utc\n products_ops << product\n end\n products_ops\n end", "def test_should_join_a_group_as_admin_via_API_JSON\r\n get \"/logout\"\r\n post \"/memberships.json\", :api_key => 'testapikey',\r\n :group_id => 1,\r\n :user_id => 1\r\n assert_response :created\r\n membership = JSON.parse(response.body)\r\n assert membership['user_id'] == 1, 'Incorrect user id'\r\n assert membership['group_id'] == 1, 'Incorrect group id'\r\n assert membership['role_id'].to_i == Role.find_by_rolename('group_admin').id, 'Incorrect role id' \r\n end", "def create\n @product = Product.new(product_params)\n\n #permitted_columns = params[:products_purposes_relations].permit(:product_id, :purpose_id, :stars)\n # @products_purposes_relation = @product.products_purposes_relations.create(permitted_columns)\n\n respond_to do |format|\n if @product.save\n format.html { redirect_to @product, notice: t('create_success') }\n format.json { render :show, status: :created, location: @product }\n else\n format.html { render :new }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n \n end", "def create\n @ordered_product = OrderedProduct.new(params[:ordered_product])\n rented_products = params['rented_products'] ? params['rented_products'] : {}\n # p product_packages\n respond_to do |format|\n format.html { redirect_to @ordered_product.complex_triggered_save(current_user.id, rented_products) }\n end\n end", "def import(products, user_auth, options)\n post_with_auth 'item/import',\n ImportResponse,\n products: products,\n user_auth: user_auth,\n options: options\n end" ]
[ "0.65927005", "0.59106016", "0.57836235", "0.57550025", "0.57131666", "0.5702658", "0.5685063", "0.56718564", "0.56688595", "0.56345135", "0.55863976", "0.5487059", "0.54579306", "0.5430024", "0.54161096", "0.538218", "0.53583395", "0.5358096", "0.5349153", "0.5323442", "0.5318176", "0.5317965", "0.52516747", "0.525125", "0.5234233", "0.522808", "0.5218726", "0.5210215", "0.52068245", "0.5190565", "0.51780236", "0.5162499", "0.5155087", "0.5153504", "0.51502806", "0.5144239", "0.5142566", "0.5133117", "0.5125361", "0.51225275", "0.5099035", "0.50878054", "0.50859106", "0.5082212", "0.5074034", "0.5067691", "0.5053861", "0.5049055", "0.50485486", "0.5048388", "0.5045705", "0.5044507", "0.5037569", "0.5032202", "0.5029062", "0.5021525", "0.5014353", "0.5009782", "0.50037444", "0.49984798", "0.49955383", "0.49873272", "0.4984629", "0.49839416", "0.49834907", "0.49743277", "0.49714577", "0.4970348", "0.49658236", "0.49643293", "0.49589697", "0.49387652", "0.49384022", "0.4933173", "0.49285156", "0.49246114", "0.49195158", "0.49190226", "0.491085", "0.4904924", "0.48864377", "0.48840514", "0.48838118", "0.48829877", "0.48828682", "0.48713058", "0.4870549", "0.48643535", "0.486118", "0.48564124", "0.48549077", "0.48525512", "0.48520678", "0.4851946", "0.4851649", "0.48498812", "0.48463246", "0.4845976", "0.48424911", "0.48413742" ]
0.64231145
1
PATCH/PUT /groups/1/products_auths/1 PATCH/PUT /groups/1/products_auths/1.json
def update @product_auth = ProductAuth.where({product_id: params[:id], group_id: params[:group_id]}).first if @product_auth.update(params.permit(:state)) render json: @product_auth else render json: @product_auth.errors end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update\n @product_group = ProductGroup.find(params[:id])\n\n respond_to do |format|\n if @product_group.update_attributes(params[:product_group])\n format.html { redirect_to @product_group, notice: 'Product group was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product_group.errors, status: :unprocessable_entity }\n end\n end\n end", "def patch_params\n if params[\"product_group\"] and params[\"product_group\"][\"product_scopes_attributes\"].is_a?(Array)\n params[\"product_group\"][\"product_scopes_attributes\"] = params[\"product_group\"][\"product_scopes_attributes\"].group_by {|a| a[\"id\"]}.map do |scope_id, attrs|\n a = { \"id\" => scope_id, \"arguments\" => attrs.map{|a| a[\"arguments\"] }.flatten }\n if name = attrs.first[\"name\"]\n a[\"name\"] = name\n end\n a\n end\n end\n end", "def update\n respond_to do |format|\n if @product_group.update(product_group_params)\n format.html { redirect_to @product_group, notice: \"Product group was successfully updated.\" }\n format.json { render :show, status: :ok, location: @product_group }\n else\n format.html { render :edit, status: :unprocessable_entity }\n format.json { render json: @product_group.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @product_product_group.update(product_product_group_params)\n format.html { redirect_to @product_product_group, notice: 'Product product group was successfully updated.' }\n format.json { render :show, status: :ok, location: @product_product_group }\n else\n format.html { render :edit }\n format.json { render json: @product_product_group.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @group_product.update(group_product_params)\n format.html { redirect_to @group_product, notice: 'Group product was successfully updated.' }\n format.json { render :show, status: :ok, location: @group_product }\n else\n format.html { render :edit }\n format.json { render json: @group_product.errors, status: :unprocessable_entity }\n end\n end\n end", "def editProd()\n if(!authenticateAdmin(params[:admin_id], params[:admin_auth_key]))\n render json: {status: false, reason: \"Authentication Failed\", data: \"\"}\n return\n end\n p = Product.find(params[:id])\n status = p.update(name: params[:name], price: params[:price].to_f, category_id: params[:cat_id])\n error = \"\"\n if(p.errors.full_messages.count > 0)\n error = c.errors.full_messages[0]\n end\n render json: {status: status, reason: error, data: \"\"}\n end", "def update\n respond_to do |format|\n if @products_grupo.update(products_grupo_params)\n format.html { redirect_to @products_grupo, notice: 'Products grupo was successfully updated.' }\n format.json { render :show, status: :ok, location: @products_grupo }\n else\n format.html { render :edit }\n format.json { render json: @products_grupo.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = current_user.user_info.products.find(params[:id])\n @product = Product.find(params[:id]) if current_user.user_info.admin \n respond_to do |format|\n if @product.update_attributes(params[:product])\n Shopify.modify @product\n format.html { redirect_to :action => 'index' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def UpdateGroup params = {}\n \n APICall(path: 'groups.json',method: 'PUT',payload: params.to_json)\n \n end", "def update\n begin\n @api_v1_product.update!(api_v1_product_params)\n head :no_content\n rescue => ex\n json_response({error: ex.message}, :unprocessable_entity)\n end\n end", "def update\n updateProduct = Product.find_by_id(params[:id])\n updateProduct.update(products_params)\n if updateProduct != nil\n msg = { status: 200 , product: updateProduct }\n respond_to do |format|\n format.html { render json: msg }\n format.json { render json: msg }\n end\n else\n msg = { status: 422 }\n respond_to do |format|\n format.html { render json: msg }\n format.json { render json: msg }\n end\n end\n end", "def update\n if check_user_permission(@product.project)\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: \"Product was successfully updated.\" }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end\n end", "def update\n group_ids = params[:group_id]\n org_id = params[:organization_id]\n @user = User.find_by_id(params[:user_id])\n current_group_ids = @user.user_group_ids\n\n respond_to do |format|\n if @user.update_attributes(full_name: params[:full_name], abbreviation: params[:abbreviation], email: params[:email], status: params[:status], staff_number: params[:employee_id], career_path: params[:career_path])\n\n is_logged = [email protected]_changes.blank?\n if current_group_ids != group_ids\n @user.user_group_ids = group_ids\n format.json { render json: @user }\n end\n else\n format.json { render json: @user.errors.messages, status: :unprocessable_entity }\n end\n end\n end", "def update\n #logger.info \"Put parameters: #{params.to_json}\"\n\n if @membership.admin \n if @group.update_attributes(params[:group])\n head :no_content\n else\n render json: @group.errors, status: :unprocessable_entity\n end\n else \n render json: {error: \"YOU MUST BE AN ADMINISTRATOR TO COMPLETE THIS ACTION\"}, status: :unprocessable_entity\n end \n end", "def update\n authorize! :manage, @product , :message => \"Access denied.\"\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n if @product.update(product_params)\n render json: @product, status: :ok#, location: @collection\n else\n render json: @product.errors, status: :unprocessable_entity\n end\n end", "def update\n logger.info \"Put parameters: #{params.to_json}\"\n @group = Group.find(params[:id])\n\n if @group.update_attributes(params[:group])\n head :no_content\n else\n render json: @group.errors, status: :unprocessable_entity\n end\n end", "def index\n render json: ProductAuth.where({group_id: params[:group_id]})\n end", "def update\n authorize! :update, CompetenceTierGroup\n \n @competence_tier_group.update!(competence_tier_group_params)\n render json: {status: :ok}\n end", "def update\n authorize @product\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to products_path, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n authorize\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: \"Product was successfully updated.\" }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit, status: :unprocessable_entity }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n set_feature_group\n respond_to do |format|\n if @feature_group.update(feature_group_params)\n format.html { redirect_to @product, notice: 'Feature group was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @feature_group.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n authorize! :manage, @product, :message => 'Not authorized as an administrator'\n @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n authorize @product\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update_harman_employee_pricing\n authorize! :update, :harman_employee_pricing\n params[:product_attr].to_a.each do |key, attr|\n product = Product.find(key)\n product.update_attributes(attr)\n end\n redirect_to(harman_employee_pricing_admin_products_path, notice: \"Pricing updated successfully.\")\n end", "def update_many\n respond_to do |format|\n if @products.update_all(product_params)\n format.html { redirect_to products_url, notice: 'Products were successfully updated.' }\n format.json { render :index, status: :ok, location: products_url }\n else\n format.html { render :index }\n format.json { render json: @products.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n authorize @product\n respond_to do |format|\n if @product.update(product_params)\n\n require 'mixpanel-ruby'\n tracker = Mixpanel::Tracker.new(ENV['MIXPANEL_PROJECT_TOKEN'])\n tracker.track(current_user.id, 'Product Update', @product.attributes)\n\n format.html { redirect_to @product, notice: I18n.t('products.update.success') }\n format.json { render :show, status: :ok, location: @product }\n else\n\n require 'mixpanel-ruby'\n tracker = Mixpanel::Tracker.new(ENV['MIXPANEL_PROJECT_TOKEN'])\n tracker.track(current_user.id, 'Product Update Failed', @product.attributes)\n\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product_tree_group = ProductTreeGroup.find(params[:id])\n\n respond_to do |format|\n if @product_tree_group.update_attributes product_tree_group_params\n format.html { redirect_to admin_product_tree_group_path(@product_tree_group), notice: '产品修改成功.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product_tree_group.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n if (!product_params[:is_removal] && @product.update(product_args)) ||\n (product_params[:is_removal] && @product.destroy)\n render json: Product.all, status: :ok\n else\n render json: @product.errors, status: :unprocessable_entity\n end\n end", "def update\n respond_to do |format|\n if @api_v1_group_update.update(api_v1_group_update_params)\n format.html { redirect_to @api_v1_group_update, notice: 'Group update was successfully updated.' }\n format.json { render :show, status: :ok, location: @api_v1_group_update }\n else\n format.html { render :edit }\n format.json { render json: @api_v1_group_update.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n authorize! :update, @group\n @group.creator = current_user\n respond_to do |format|\n if @group.update(group_params)\n format.html { redirect_to @group, notice: 'Group was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @group.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n return unless product_params\n render json: @product.simple_info, status: :ok if @product.update!(@product_params)\n rescue => e\n render json: { error: e }, status: :ok\n end", "def update\n respond_to do |format|\n if @collection_group.update(collection_group_params)\n format.html { redirect_to @collection_group, notice: 'Collection group was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @collection_group.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n group = Group.find(params[:id])\n if group.update(group_params)\n render json: group\n else\n render json: group.errors.full_messages, status: :unprocessable_entity\n end\n end", "def update\n authorize @group\n respond_to do |format|\n if @group.update(group_params)\n format.html { redirect_to group_path(@group), notice: \"Group was successfully updated.\" }\n format.json { render :show, status: :ok, location: @group }\n else\n format.html { render :edit }\n format.json { render json: @group.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n authorize! :update, @product\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @admin_product.update(admin_product_params)\n format.html { redirect_to admin_products_url }\n format.json { render :show, status: :ok, location: @admin_product }\n else\n format.html { render :edit }\n format.json { render json: @admin_product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n authorize! :manage, @packages , :message => \"Access denied.\"\n respond_to do |format|\n if @package.update(package_params)\n format.html { redirect_to [@package.product, @package], notice: 'Package was successfully updated.' }\n format.json { render :show, status: :ok, location: @package }\n else\n format.html { render :edit }\n format.json { render json: @package.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product.update(product_params)\n set_products\n end", "def update\n respond_to do |format|\n if @supergroup.update(supergroup_params)\n format.html { redirect_to @supergroup, notice: \"#{supergroup.titlecase} was successfully updated.\" }\n format.json { render :show, status: :ok, location: @supergroup }\n else\n format.html { render :edit }\n format.json { render json: @supergroup.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product_product.user_updated_id = current_user.id\n respond_to do |format|\n if @product_product.update(product_product_params)\n format.html { redirect_to product_products_path, notice: I18n.t('products.controller.update') }\n format.json { render :show, status: :ok, location: @product_product }\n else\n format.html { render :edit }\n format.json { render json: @product_product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @asagroupservobj = Asagroupservobj.find(params[:id])\n\n respond_to do |format|\n if @asagroupservobj.update_attributes(params[:asagroupservobj])\n format.html { redirect_to @asagroupservobj, notice: 'Asagroupservobj was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @asagroupservobj.errors, status: :unprocessable_entity }\n end\n end\n end", "def product_product_group_params\n params.require(:product_product_group).permit(:product_id, :product_group_id)\n end", "def update\r\n respond_to do |format|\r\n if @agroup.update(agroup_params)\r\n format.html { redirect_to @agroup, notice: 'Agroup was successfully updated.' }\r\n format.json { head :no_content }\r\n else\r\n format.html { render action: 'edit' }\r\n format.json { render json: @agroup.errors, status: :unprocessable_entity }\r\n end\r\n end\r\n end", "def update\n token = params[:token]\n respond_to do |format|\n if @group.update(group_params)\n format.html { redirect_to group_path(:id => @group.id), notice: 'Group was successfully updated.' }\n format.json { render :show, status: :ok, location: @group }\n else\n format.html { render :edit }\n format.json { render json: @group.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n render json: @sub_product.errors unless @sub_product.update(sub_product_params)\n end", "def update\n render json: @sub_product.errors unless @sub_product.update(sub_product_params)\n end", "def update_harman_employee_pricing\n authorize! :update, :harman_employee_pricing\n Array(params[:product_attr].to_unsafe_h).each do |key, attr|\n product = Product.find(key)\n product.update(attr)\n end\n redirect_to(harman_employee_pricing_admin_products_path, notice: \"Pricing updated successfully.\")\n end", "def test_should_update_group_user_via_API_JSON\r\n # lookup user's membership\r\n get \"memberships/find.json?api_key=testapikey&user_id=4&group_id=1\"\r\n membership = JSON.parse(response.body)\r\n membership_id = membership['id']\r\n assert membership_id == 3, 'Incorrect membership id'\r\n assert membership['role_id'] == Role.find_by_rolename('user').id, 'Incorrect role id'\r\n \r\n # promote user to group admin\r\n put \"/memberships/#{membership_id}.xml\", :api_key => 'testapikey',\r\n :membership => {:user_id => 4,\r\n :group_id => 1,\r\n :role_id => Role.find_by_rolename('group_admin') }\r\n assert_response :success\r\n end", "def set_product_group\n @product_group = ProductGroup.find(params[:id])\n end", "def update_product\n @product = current_user.products.active.find_by_code(params[:code])\n @product.update_attributes(params[:product])\n respond_to do |format|\n format.html { redirect_to profile_product_path(@product.code) }\n format.json { render json: @product, status: :created }\n end\n end", "def update_product\n @product = current_user.products.active.find_by_code(params[:code])\n @product.update_attributes(params[:product])\n respond_to do |format|\n format.html { redirect_to profile_product_path(@product.code) }\n format.json { render json: @product, status: :created }\n end\n end", "def update\n \n if params[:category_ids].present?\n @product.categories.clear\n params[:category_ids].each do |id| \n @product.categories << Category.find(id)\n end\n end\n \n @product.areas.clear\n if params[:area_ids].present?\n @product.areas.clear\n params[:area_ids].each do |id| \n @product.areas << Area.find(id)\n end\n end\n \n if params[:article_ids].present?\n @product.articles.clear\n params[:article_ids].each do |id| \n @product.articles << Article.find(id)\n end\n end\n \n # update status\n @product.status = nil if params[:product][:status].present?\n @product.status = params[:product][:status].join(\",\") if params[:product][:status].present?\n \n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to edit_admin_product_path(@product.id), notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def set_product_product_group\n @product_product_group = ProductProductGroup.find(params[:id])\n end", "def update\n if current_user.id == 1\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n else\n respond_to do |format|\n format.html { render :index }\n # format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update!(**args)\n @group_ids = args[:group_ids] if args.key?(:group_ids)\n @id = args[:id] if args.key?(:id)\n end", "def update\n authorize @products_color\n respond_to do |format|\n if @products_color.update(products_color_params)\n format.html { redirect_to products_path, notice: 'Products color was successfully updated.' }\n format.json { render :show, status: :ok, location: @products_color }\n else\n format.html { render :edit }\n format.json { render json: @products_color.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @product = @user.products.find(params[:id])\n # was @product = Product.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to user_products_url(@user), notice: 'El producto fue creado exitosamente.' }\n # format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n #Finding the specific chore where the id matches the one we pass in with the body\n @v1_chore = Chore.where(id: params[:id]).first\n #Here we're checking if we have user_id in our body, and if we do, we'll change the selected chore's properties\n #with the parameters of the body, we go through the specific group to our specific chore with the path\n if v1_chore_params[:user_id]\n @v1_chore.user_id = params[:user_id]\n @v1_chore.assigned = true\n if @v1_chore.save\n render :show, status: :ok\n end\n else\n render json: @v1_chore.errors, status: :unprocessable_entity\n end\n end", "def update\n @tasks_group = @tasks_group.do_before_update\n respond_to do |format|\n if @tasks_group.update(tasks_group_params)\n format.html { redirect_to @tasks_group, notice: 'Tasks group was successfully updated.' }\n format.json { render :show, status: :ok, location: @tasks_group }\n else\n format.html { render :edit }\n format.json { render json: @tasks_group.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n\n if @group.update(group_params)\n head :no_content\n else\n render json: @group.errors, status: :unprocessable_entity\n end\n end", "def update\n @product = Product.find(params[:id])\n #@product.accessible = :all if admin?\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to quick_list_path, :notice => 'Product was successfully updated.' }\n format.xml { head :ok }\n else\n format.html { render :action => \"edit\" }\n format.xml { render :xml => @product.errors, :status => :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @group.update(group_params)\n format.json { head :no_content }\n else\n format.json { render json: @group.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n if @product.update(product_params)\n render json: @product\n else\n render json: @product.errors, status: :unprocessable_entity\n end\n end", "def update!(**args)\n @kind = args[:kind] unless args[:kind].nil?\n @product_id = args[:product_id] unless args[:product_id].nil?\n @product_type = args[:product_type] unless args[:product_type].nil?\n @token = args[:token] unless args[:token].nil?\n end", "def update\n unread\n\n @product = Product.find(params[:id])\n @sellers = Seller.all\n @branches = Branch.all\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n if params[:device_group][:property_ids]\n params[:device_group][:property_ids].each do |property_id|\n @property = Property.find(property_id)\n @device_group.properties << @property\n end\n end\n respond_to do |format|\n if @device_group.update(device_group_params)\n format.html { redirect_to @device_group, notice: 'Device group was successfully updated.' }\n format.json { render :show, status: :ok, location: @device_group }\n format.js {\n @device_groups = DeviceGroup.all\n render :update\n }\n else\n format.html { render :edit }\n format.json { render json: @device_group.errors, status: :unprocessable_entity }\n format.js {\n @device_groups = DeviceGroup.all\n render :update\n }\n end\n end\n end", "def update\n @group = Group.find(params[:id])\n if [email protected]?\n @user=User.find(@group.admin)\n puts \"///////group controller update//////////\"+ @user.to_json\n @user.update_attributes(role: '1')\n @user.update_attributes(group_id: @group._id)\n end\n @group.update_attributes(params[:group])\n #format.html { redirect_to @group, notice: 'Group was successfully updated.' }\n #format.json { render json: @group, status: :accepted, location: @group }\n render 'show'\n #format.html { render action: \"edit\" }\n #format.json { render json: @group.errors, status: :unprocessable_entity } \n end", "def set_group_product\n @group_product = GroupProduct.find(params[:id])\n end", "def update\n @product.assign_attributes object_params.reject{|_, v| v.blank?}\n # In a normal app we have a pre filled form of the object to update,\n # so when we do a PATCH (or PUT) we send all the attributes again,\n # in the API we permit to send any field to update, so we need to remove\n # all the blank params of the object to prevent validations triggers of\n # attributes that we don't send to update\n if @product.save\n render json: @product.to_json\n else\n render json: @product.errors, status: :unprocessable_entity\n end\n end", "def update\n @product = Product.find(params[:id])\n\n if @product.update(product_params)\n head :no_content\n else\n render json: @product.errors, status: :unprocessable_entity\n end\n end", "def update\n if params[:id].to_i == 0 and ['featured','clearance','whats_new'].include? params[:id]\n @products = Product.send(params[:id])\n @products.each do |product|\n product.send(params[:id] + '_position=', params[params[:id]].index(product.id.to_s))\n product.save\n end\n render :nothing => true and return\n end\n if params.has_key? :product\n @editable_params[:category_ids].uniq! if @editable_params.has_key? :category_ids\n end\n @product.send(params[:event]) if params.has_key? :event\n if params.has_key? :product and @product.update_attributes(@editable_params)\n @product_section = 'overview'\n render :template => 'manage/products/show' and return\n elsif !params.has_key? :product\n @product_section = 'overview'\n render :template => 'manage/products/show' and return\n else\n @product_section = 'info'\n render :template => 'manage/products/edit' and return\n end\n end", "def update\n models = params[:product][:models_attributes]\n if !models.nil?\n models.each do |model|\n model[1][:characteristics] = sanitize_attributes(model[1][:characteristics])\n end\n end\n\n clean_params = product_params\n clean_params[:specifications] = sanitize_data(clean_params[:specifications])\n clean_params[:features] = sanitize_data(clean_params[:features])\n clean_params[:attributes_titles] = sanitize_attributes(clean_params[:attributes_titles])\n\n @product.picture.destroy if params[:remove_picture] == '1'\n respond_to do |format|\n if @product.update(clean_params)\n format.html { redirect_to [:admin, @product], notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @model.update(model_params)\n \t\n# \t\tModelProduct.where(:model_id => @model.id).delete_all\n# \t \tunless params[:model_products].nil?\n# \t \t\tparams[:model_products].each do |product_id|\n# \t \t\t\tModelProduct.create(:model_id => @model.id, :product_id => product_id)\n# \t \t\tend\n# \t \tend\n\n format.html { redirect_to @model, notice: 'Model was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @model.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n # Find product by its id\n @product = Product.find(params[:product_id])\n # Find product stock by its id\n @products_stock = ProductsStock.find(params[:id])\n # authorize @products_stock\n if @products_stock.update(products_stock_params)\n # If update successful go to manager home page\n redirect_to partner_side_path\n else\n # If something goes wrong re-render products stock edit page\n render :edit\n end\n end", "def update\n @product_management = ProductManagement.find(params[:id])\n\n if @product_management.update(product_management_params)\n head :no_content\n else\n render json: @product_management.errors, status: :unprocessable_entity\n end\n end", "def update\n respond_to do |format|\n if @os_groups_principal.update(os_groups_principal_params)\n format.html { redirect_to @os_groups_principal, notice: 'Os groups principal was successfully updated.' }\n format.json { render :show, status: :ok, location: @os_groups_principal }\n else\n format.html { render :edit }\n format.json { render json: @os_groups_principal.errors, status: :unprocessable_entity }\n end\n end\n end", "def update_group_with_http_info(group, group_oid, opts = {})\n if @api_client.config.debugging\n @api_client.config.logger.debug 'Calling API: UserApi.update_group ...'\n end\n # verify the required parameter 'group' is set\n if @api_client.config.client_side_validation && group.nil?\n fail ArgumentError, \"Missing the required parameter 'group' when calling UserApi.update_group\"\n end\n # verify the required parameter 'group_oid' is set\n if @api_client.config.client_side_validation && group_oid.nil?\n fail ArgumentError, \"Missing the required parameter 'group_oid' when calling UserApi.update_group\"\n end\n # resource path\n local_var_path = '/user/groups/{group_oid}'.sub('{' + 'group_oid' + '}', group_oid.to_s)\n\n # query parameters\n query_params = {}\n\n # header parameters\n header_params = {}\n header_params['X-UltraCart-Api-Version'] = @api_client.select_header_api_version()\n # HTTP header 'Accept' (if needed)\n header_params['Accept'] = @api_client.select_header_accept(['application/json'])\n # HTTP header 'Content-Type'\n header_params['Content-Type'] = @api_client.select_header_content_type(['application/json; charset=UTF-8'])\n\n # form parameters\n form_params = {}\n\n # http body (model)\n post_body = @api_client.object_to_http_body(group)\n auth_names = ['ultraCartOauth', 'ultraCartSimpleApiKey']\n data, status_code, headers = @api_client.call_api(:PUT, local_var_path,\n :header_params => header_params,\n :query_params => query_params,\n :form_params => form_params,\n :body => post_body,\n :auth_names => auth_names,\n :return_type => 'GroupResponse')\n if @api_client.config.debugging\n @api_client.config.logger.debug \"API called: UserApi#update_group\\nData: #{data.inspect}\\nStatus code: #{status_code}\\nHeaders: #{headers}\"\n end\n return data, status_code, headers\n end", "def update_products_details; end", "def update\n @product = Product.eager_loading.find(params[:id])\n\n respond_to do |format|\n if @product.update_attributes(params[:product])\n format.html { redirect_to shop_products_path(@product.shop.uuid), notice: 'Product was successfully updated.' }\n format.json { render json: @product.to_json(:include => {:product_variants => {:include => [:option_types,:pictures]}})}\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n\n params[:state] = 1;\n @product_auth = ProductAuth.new(params.permit(:state, :product_id, :group_id, :ecos, :euros));\n if @product_auth.save\n render json: @product_auth\n else\n render json: @product_auth.errors\n end\n end", "def update\n @angular = Product.find(params[:id])\n \n @angular.update_attributes(title: params[:products][:title], description: params[:products][:description])\n respond_to do |format|\n if @angular.valid?\n format.html { redirect_to store_index_path, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @angular.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @esol_group = EsolGroup.find(params[:id])\n\n respond_to do |format|\n if @esol_group.update_attributes(params[:esol_group])\n format.html { redirect_to @esol_group, notice: 'Esol group was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @esol_group.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n if @group.update_attributes(params[:group])\n respond_with(@group, only: [:id, :name, :creator_id, :admin_id])\n else\n render_error(404, request.path, 20103, \"Failed to update group info\")\n end\n end", "def update\n @group = Group.find(params[:id])\n @users = @group.users\n respond_to do |format|\n if @group.update_attributes(params[:group])\n format.html { redirect_to edit_user_registration_path, notice: 'Group was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @group.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n if @group.update(group_params)\n render_json_message({success: t('.success')}, 200)\n else\n render_json_message({errors: @group.errors.messages}, 422)\n end\n end", "def update\n respond_to do |format|\n if @gallery_group.update(gallery_group_params)\n format.html { redirect_to @gallery_group, notice: 'Gallery group was successfully updated.' }\n format.json { render :show, status: :ok, location: @gallery_group }\n else\n format.html { render :edit }\n format.json { render json: @gallery_group.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @grupo = Grupo.find(params[:id])\n authorize! :edit, @grupo\n\n respond_to do |format|\n if @grupo.update_attributes(params[:grupo])\n format.html { redirect_to @grupo, notice: 'Grupo was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @grupo.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @resource_group.update(resource_group_params)\n format.html { redirect_to @resource_group, notice: 'Resource group was successfully updated.' }\n format.json { render :show, status: :ok, location: @resource_group }\n else\n format.html { render :edit }\n format.json { render json: @resource_group.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n # Solo el admin puede crear y editar productos, asique no tiene mucho sentido validar estas acciones\n # (se le da poder total al admin, no tiene sentido restringirlo)\n ProductCategory.create(product_id: @product.id, category_id: association_changes[:add_category]).id unless association_changes[:add_category].blank?\n ProductCategory.where(category_id: association_changes[:remove_category], product_id: @product.id).delete_all unless association_changes[:remove_category].blank?\n\n ProductTag.create(product_id: @product.id, tag_id: association_changes[:add_tag]).id unless association_changes[:add_tag].blank?\n ProductTag.where(tag_id: association_changes[:remove_tag], product_id: @product.id).delete_all unless association_changes[:remove_tag].blank?\n\n respond_to do |format|\n if @product.update(product_params)\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { render :show, status: :ok, location: @product }\n else\n format.html { render :edit }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @pagetitle = \"Edit product\"\n \n @product = Product.find(params[:id])\n @company = @product.company\n @suppliers = @company.get_suppliers()\n @marcas = @company.get_marcas()\n @modelos = @company.get_modelos()\n @categories = @company.get_categories() \n @unidades = Unidad.all\n\n respond_to do |format|\n if @product.update_attributes(products_params)\n format.html { redirect_to(@product, :notice => 'Product was successfully updated.') }\n format.xml { head :ok }\n else\n format.html { render :action => \"edit\" }\n format.xml { render :xml => @product.errors, :status => :unprocessable_entity }\n end\n end\n end", "def update\n @product = Product.find(params[:id])\n\t\trespond_to do |format|\n\t\t if @product.update_attributes(params[:product])\n\t\t\tif @product.photo.nil?\n\t\t\t\tphoto = Photo.find_by_product_id(@product.id)\n\t\t\t\[email protected]_attributes(:photo_id => photo.id) if !photo.nil?\n\t\t\tend\n\t\t\tformat.html { redirect_to @product, :notice => 'Успешно обновлено' }\n\t\t\tformat.json { head :no_content }\n\t\t else\n\t\t\tformat.html { render :action => \"edit\" }\n\t\t\tformat.json { render :json => @product.errors, :status => :unprocessable_entity }\n\t\t end\n\t\tend\n end", "def update\n respond_to do |format|\n if @approval_group.update(approval_group_params)\n format.html { redirect_to @approval_group, notice: 'Approval group was successfully updated.' }\n format.json { render :show, status: :ok, location: @approval_group }\n else\n format.html { render :edit }\n format.json { render json: @approval_group.errors, status: :unprocessable_entity }\n end\n end\n end", "def show\n # is_my_resource(params[:id])\n\n # prossumerProductsIds = Prossumer.find(params[:id]).products.ids\n render json: ProductAuth.where({product_id: params[:id], group_id: params[:group_id]}).first.as_json(:include => :product)\n end", "def update\n @pgroup = Pgroup.find(params[:id])\n\n respond_to do |format|\n if @pgroup.update_attributes(params[:pgroup])\n format.html { redirect_to @pgroup, notice: 'Pgroup was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @pgroup.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @pgroup = Pgroup.find(params[:id])\n\n respond_to do |format|\n if @pgroup.update_attributes(params[:pgroup])\n format.html { redirect_to @pgroup, notice: 'Pgroup was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @pgroup.errors, status: :unprocessable_entity }\n end\n end\n end", "def update!(**args)\n @product_sets = args[:product_sets] if args.key?(:product_sets)\n end", "def update\n @product = Product.find(params[:id])\n @product.name_prefix = @product.name.first.upcase\n respond_to do |format|\n if @product.update_attributes(params[:product])\n\n format.html { redirect_to @product, notice: 'Product was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @group_mag = GroupMag.find(params[:id])\n\n respond_to do |format|\n if @group_mag.update_attributes(params[:group_mag])\n format.html { redirect_to @group_mag, notice: 'Group mag was successfully updated.' }\n format.json { head :ok }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @group_mag.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @api_v1_group_field.update(api_v1_group_field_params)\n format.html { redirect_to @api_v1_group_field, notice: 'Group field was successfully updated.' }\n format.json { render :show, status: :ok, location: @api_v1_group_field }\n else\n format.html { render :edit }\n format.json { render json: @api_v1_group_field.errors, status: :unprocessable_entity }\n end\n end\n end" ]
[ "0.6573923", "0.65565145", "0.6440347", "0.64181745", "0.6389186", "0.6264017", "0.61744535", "0.61492085", "0.6121792", "0.60784936", "0.60574424", "0.6049499", "0.6029907", "0.6022605", "0.59897363", "0.59818065", "0.59238225", "0.5919891", "0.5898103", "0.58950806", "0.58732945", "0.58676463", "0.58635193", "0.5849404", "0.58452874", "0.58378196", "0.58323514", "0.58288056", "0.58252126", "0.58163065", "0.57907546", "0.57813543", "0.57727844", "0.57631016", "0.5755865", "0.5742693", "0.5720642", "0.5713528", "0.569945", "0.5675043", "0.56745327", "0.566268", "0.56584746", "0.5657955", "0.5655358", "0.56522787", "0.56522787", "0.56496036", "0.564582", "0.56404984", "0.5637608", "0.5637608", "0.5628229", "0.562524", "0.56244475", "0.56193596", "0.5613798", "0.56081945", "0.5607344", "0.56060964", "0.5605249", "0.5586608", "0.5585731", "0.55842316", "0.5582329", "0.55785084", "0.5577962", "0.5577088", "0.5575123", "0.55735964", "0.55676305", "0.556595", "0.55491334", "0.5547949", "0.5544561", "0.5543688", "0.55428135", "0.55422574", "0.55391896", "0.5537536", "0.5535029", "0.5531836", "0.55299526", "0.5524493", "0.55168706", "0.55106235", "0.55093503", "0.5506544", "0.54984546", "0.5496759", "0.54877174", "0.54844296", "0.54828364", "0.548017", "0.54762334", "0.54762334", "0.5475105", "0.5472697", "0.5472553", "0.5465812" ]
0.69279724
0
takie getery i setery sa niepraktyczne
def get_name @name end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set; end", "def set; end", "def zuruecksetzen()\n end", "def suivre; end", "def result_of_setting; end", "def mi_carrera\n\n\tend", "def setting; end", "def schubert; end", "def set_inventario\n end", "def set_rodzaj_pracownika\n @rodzaj_pracownika = RodzajPracownika.find(params[:id])\n end", "def solicitudes_atrasadas\n end", "def preencher\n nome.set 'leticia'\n \n end", "def changerEnRouge\n\t\t@couleur=1\n\tend", "def set_zgloszeny\n @zgloszeny = Zgloszenie.find(params[:id])\n end", "def terpene; end", "def set=(_arg0); end", "def private; end", "def set_zamowienia\n @zamowienia = Zamowienia.find(params[:id])\n end", "def set_koszyk\n @koszyk = Koszyk.find(params[:id])\n end", "def reset()\n @actual =@inicio\n end", "def specie; end", "def specie; end", "def specie; end", "def specie; end", "def set_mezosoic_era\n @mezosoic_era = MezosoicEra.find(params[:id])\n end", "def set_tubuyaki\n @tubuyaki = Tubuyaki.find(params[:id])\n end", "def set_situacao\n logger.debug \"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ\"\n\n id_busca = params[:id]\n @os_id = params[:os_id]\n @os_tarefa = OsTarefa.find(id_busca)\n @os_tarefa.situacao=params[:situacao]\n @ordem_servico = OrdemServico.find(@os_id)\n logger.debug \"ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ\"\n\n if @os_tarefa.situacao=='REJEITADA'\n @os_tarefa.ordem_servico_pagamento= nil\n @os_tarefa.situacao=OsTarefa.situacoes[2]\n else\n @os_tarefa.ordem_servico_pagamento= @ordem_servico\n @os_tarefa.situacao=OsTarefa.situacoes[0]\n end\n @os_tarefa.save\n respond_to do |format|\n\n format.json { head :no_content }\n format.js { render :layout => false }\n\n end\n end", "def set_cetak_blok\n @cetak_blok = CetakBlok.find(params[:id])\n end", "def changerEnVide\n\t\t@couleur=-1\n\tend", "def set_wypozyczeny\n @wypozyczeny = Wypozyczenie.find(params[:id])\n end", "def set_reentrada\n @reentrada = Reentrada.find(params[:id])\n end", "def set_nebulosa\n @nebulosa = Nebulosa.find(params[:id])\n end", "def konversiMenit(menit) \n # =================================\n # Area Kode Kode di sini\n \n \n \n # =================================\n end", "def set_f_dzakir\n @f_dzakir = FDzakir.find(params[:id])\n end", "def set_manteni\n @manteni = Manteni.find(params[:id])\n end", "def anular\n self.estado = Cancelacion::ESTADOS[:anulada]\n end", "def set_poznamka\n @poznamka = Poznamka.find(params[:id])\n end", "def set_opiskelija\n @opiskelija = Opiskelija.find(params[:id])\n end", "def set_zgloszenium\n @zgloszenium = Zgloszenium.find(params[:id])\n end", "def set_trabajo\n\t\t @trabajo = Trabajo.find(params[:id])\n\t\t end", "def set_vestibulando\n @vestibulando = Vestibulando.find(params[:id])\n end", "def set_ujumbe\n @ujumbe = Ujumbe.find(params[:id])\n end", "def verdi; end", "def set_reuniao\n @reuniao = Reuniao.find(params[:id])\n end", "def set_tenkenkekka\n @tenkenkekka = Tenkenkekka.find(params[:id])\n end", "def preencher\n cliente.set CONFIG['cliente']\n seleciona_Cliente.set CONFIG['idcliente']\n tipo_de_operacao.set CONFIG['tipooperacao'] \n cond_pagamento.set CONFIG['condpagamento']\n topico.set CONFIG['topico']\n tipo_venda.set CONFIG['tipovenda'] \n condicoes_de_frete.set CONFIG['condiFrete'] \n apelido.set CONFIG['apelido'] \n botaopesquisar.click\n\n end", "def set_usuario\n \n end", "def set_ton_giao\n @ton_giao = TonGiao.find(params[:id])\n end", "def set_nota_tecnica\n @nota_tecnica = NotaTecnica.find(params[:id])\n end", "def letzte_komponente\n \n end", "def set_setting\n end", "def set_klienci_jaskula\n @klienci_jaskula = KlienciJaskula.find(params[:id])\n end", "def data= data \n end", "def set_reciè\n @reciè = Reciè.find(params[:id])\n end", "def set_tiendas_juego\n @tiendas_juego = TiendasJuego.find(params[:id])\n end", "def set_xitongzhanghuguanli\n \n @xitongzhanghuguanli = Xitongzhanghuguanli.find(params[:id])\n end", "def set_muestra\n @muestra = Muestra.find(params[:id])\n end", "def set_muestra\n @muestra = Muestra.find(params[:id])\n end", "def set_tamire\n @tamire = Tamire.find(params[:id])\n end", "def set_faktury\n @faktury = Faktury.find(params[:id])\n end", "def set_familia\n \n @familia = Familia.find(params[:id]) if params[:id].to_i!=0 # si no es enterio=> no buscamos or id\n end", "def set_tipovestuario\n @tipovestuario = Tipovestuario.find(params[:id])\n end", "def set_pracownik\n @pracownik = Pracownik.find(params[:id])\n end", "def set_sintoma\n @sintoma = Sintoma.find(params[:id])\n end", "def sets\n end", "def rescatar\n self.estado_id = 1 #salida si lo creo, entrada/foliado si lo redirijo\n self.estado_id = 3 if self.trazas.count(:conditions => 'movimiento_id in (2,7,8)') > 0\n if self.origen.externo\n RAILS_DEFAULT_LOGGER.info \"devolviendo de origen externo #{self.origen.to_label} a #{self.documento.buzon}\"\n self.buzon_id = self.documento.buzon_id\n else\n self.buzon_id = self.origen_id\n end\n self.add_traza(current_user.id, 14, current_user.puesto.buzon_id)\n self.save!\n end", "def PodstawAn(ans) \n @RownanieWielomianu.each{ |wezel| \n if(ans.key?(wezel.Wspolczynnik))\n wezel.Wspolczynnik = ans[wezel.Wspolczynnik]\n end\n }\n end", "def set_zaduzenja\n @zaduzenja = Zaduzenja.find(params[:id])\n end", "def rossini; end", "def set_valor_ust\n @valor_ust = ValorUst.find(params[:id])\n end", "def set_substancia\n @substancia = Substancia.find(params[:id])\n end", "def set_detalle_gasto\n @detalle_gasto = DetalleGasto.find(params[:id])\n end", "def set\n false\n end", "def retire\n\n end", "def set_datosgenerale\n @datosgenerale = Datosgenerale.find(params[:id])\n end", "def set_ruolo\n @ruolo = Ruolo.find(params[:id])\n end", "def set_nombre(nombre) # la convencion de ruby es def nombre=(nombre) * Sin espacios\n @nombre = nombre\n end", "def set_restaurante\n\t\t@restaurante = Restaurante.find(params[:id])\t\n\tend", "def set_unidad_medida\n end", "def set_unidad_medida\n end", "def set_abastecimento\n @abastecimento = Abastecimento.find(params[:id])\n end", "def set_sabre_de_luz\n @sabre_de_luz = SabreDeLuz.find(params[:id])\n end", "def set_pelouro\n @pelouro = Pelouro.find(params[:id])\n end", "def set_koran\n @koran = Koran.find(params[:id])\n end", "def povuci_mrezu\n return @mreza\n end", "def set_kontrahenci\n #@kontrahenci = Kontrahenci.find(params[:id])\n @kontrahenci = Kontrahenci.joins(:opiekun).find(params[:id])\n end", "def set_zutaten\n @zutaten = Zutaten.find(params[:id])\n end", "def reiniciar\n reset()\n @mapaactual=@mapainicio\n\n end", "def set_tzeet\n @tzeet = Tzeet.find(params[:id])\n end", "def set_usua\n @usua = Usua.find(params[:id])\n end", "def set_mindicadorpf\n @registro = @mindicadorpf = Mindicadorpf.find(params[:id])\n end", "def set_grua\n @grua = Grua.find(params[:id])\n end", "def set_grua\n @grua = Grua.find(params[:id])\n end", "def set_detalle_orden_trabajo\n @detalle_orden_trabajo = DetalleOrdenTrabajo.find(params[:id])\n end", "def mutar(m)\n\t\t@valor = @valor.mutar(m)\n\tend", "def set_traslado\n @traslado = Traslado.find(params[:id])\n end", "def set_verfugbarkeit\n @verfugbarkeit = Verfugbarkeit.find(params[:id])\n end", "def set_tangazo\n @tangazo = Tangazo.find(params[:id])\n end", "def set_poliza\n @poliza = Poliza.find(params[:id])\n end", "def set_poliza\n @poliza = Poliza.find(params[:id])\n end", "def set_esjiaoben\n @esjiaoben = Esjiaoben.find(params[:id])\n end" ]
[ "0.70543814", "0.70543814", "0.6861453", "0.6693142", "0.6418386", "0.633405", "0.6044758", "0.60381496", "0.5949716", "0.5906479", "0.5890934", "0.58308923", "0.5818513", "0.58088523", "0.5793163", "0.57498306", "0.5741255", "0.573866", "0.57363963", "0.57179254", "0.5717189", "0.5717189", "0.5717189", "0.5717189", "0.5696516", "0.56820315", "0.56744015", "0.5658729", "0.56543094", "0.56342185", "0.563377", "0.5609481", "0.56081754", "0.56046766", "0.56027734", "0.55979544", "0.5593418", "0.5582061", "0.5577463", "0.55709946", "0.55696625", "0.5560978", "0.5557226", "0.5548774", "0.553941", "0.5534698", "0.5534588", "0.55319583", "0.5531255", "0.5529046", "0.5527658", "0.5526515", "0.55129784", "0.54935414", "0.5491029", "0.54901975", "0.54896295", "0.54896295", "0.5485589", "0.54841286", "0.5480129", "0.5473877", "0.5473809", "0.5473413", "0.54611886", "0.5459705", "0.54583114", "0.5456089", "0.5455807", "0.54537034", "0.5453313", "0.5447993", "0.54410195", "0.5437107", "0.5433935", "0.54325163", "0.5429623", "0.54293966", "0.542888", "0.542888", "0.5426417", "0.54253", "0.54145354", "0.5412741", "0.54124874", "0.54112166", "0.5410847", "0.54045886", "0.5402791", "0.54023266", "0.5400599", "0.53951997", "0.53951997", "0.53910714", "0.53891325", "0.5387478", "0.53866845", "0.53767246", "0.53738314", "0.53738314", "0.5373173" ]
0.0
-1
Execute a database transaction
def transaction @database.transaction { yield self } end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def commit_db_transaction\n execute(\"COMMIT\")\n end", "def commit_db_transaction\n execute(\"COMMIT\")\n end", "def begin_db_transaction\n execute(\"BEGIN\")\n end", "def begin_db_transaction\n execute(\"BEGIN\")\n end", "def begin_db_transaction\n @transaction = @connection.transaction('READ COMMITTED')\n end", "def commit_transaction(tx)\n tx.execute\n end", "def exec_rollback_db_transaction\n @connection.rollback\n @connection.autocommit = true\n end", "def commit_db_transaction() end", "def commit_db_transaction() end", "def commit_db_transaction\n @transaction = @connection.commit\n end", "def begin_db_transaction() end", "def begin_db_transaction() end", "def execute\n Trade.transaction do\n execute_without_transaction!\n end\n end", "def transaction\n begin\n if block_given?\n begin_db_transaction\n result = yield\n commit_db_transaction\n result\n end\n rescue Exception => database_transaction_rollback\n rollback_db_transaction\n raise\n end\n end", "def transaction(mode = :deferred, &block)\n @db.transaction(mode, &block)\n end", "def commit_db_transaction\n log('COMMIT', 'TRANSACTION') { @connection.commit }\n end", "def transaction(opts={}, &blk)\n Toshi.db.transaction(opts, &blk)\n end", "def rollback_db_transaction() end", "def rollback_db_transaction() end", "def commit_db_transaction\n @connection.commit\n @connection.autocommit = true\n end", "def transaction\n start_transaction!\n\n result = yield\n\n query 'COMMIT'\n\n result\n rescue\n query 'ROLLBACK'\n raise\n\n ensure\n end_transaction!\n end", "def exec_rollback_db_transaction\n log('ROLLBACK', 'TRANSACTION') { @connection.rollback }\n end", "def transaction; end", "def transaction; end", "def transaction; end", "def transaction(&block); end", "def begin\n db.transaction do\n yield\n end\n end", "def commit_db_transaction\n log('commit transaction', nil) { @connection.commit }\n end", "def begin_db_transaction\n @connection.autocommit = false\n end", "def transaction(&block)\n yield\n commit\n end", "def begin_db_transaction\n log('BEGIN', 'TRANSACTION') { @connection.begin }\n end", "def begin_db_transaction\n # PG driver doesn't really do anything on setAutoCommit(false)\n # except for commit-ing a previous pending transaction if any\n log('/* BEGIN */') { @connection.begin }\n end", "def transaction(&block)\n ActiveRecord::Base.transaction(&block)\n end", "def commit_transaction_sql\n SQL_COMMIT\n end", "def commit_transaction_sql\n SQL_COMMIT\n end", "def transaction(object)\n object.db.transaction {raise ::Sequel::Error::Rollback unless yield}\n end", "def commit()\n check_return_code(PureHailDB.ib_trx_commit(@trx_ptr))\n end", "def transaction\n start_transaction\n\n yield\n ensure\n end_transaction if transaction_started?\n end", "def call\n transaction do\n execute!\n\n confirm_success\n end\n\n rescue Exception => error\n confirm_failure error\n end", "def rollback_db_transaction\n execute(\"ROLLBACK\")\n end", "def rollback_db_transaction\n execute(\"ROLLBACK\")\n end", "def transaction(start_db_transaction=true)\n yield\n end", "def transaction\n start\n yield self\n rescue Object => ex\n rollback\n debug \"#{ex.class}: #{ex.message}\"\n ex.backtrace.each { |line| debug line }\n else\n commit\n end", "def commit_transaction(tx)\n tx.execute(self)\n end", "def transaction\n raise Mysql::Error, 'Not Connected' if @my.nil?\n\n if block_given?\n begin\n @my.query('START TRANSACTION WITH CONSISTENT SNAPSHOT')\n yield # Start executing the query black.\n @my.query('COMMIT')\n rescue Mysql::Error => e\n @my.query('ROLLBACK')\n raise e\n end\n end\n end", "def within_transaction; end", "def within_transaction; end", "def transaction(&block)\n self['AutoCommit'] = false\n self.do_transaction(&block)\n self['AutoCommit'] = true\n end", "def TransactionBegin()\n\[email protected](\"BEGIN\")\nend", "def transaction(&block)\n block.call\n end", "def begin_db_transaction\n log('begin transaction', nil) do\n begin_isolated_db_transaction(default_transaction_isolation)\n end\n end", "def transaction(&block)\n begin\n @store.transaction\n block.call(@store)\n @store.commit\n rescue SQLite3::Exception => exception\n raise \"SQLite exception: #{exception}\"\n end\n end", "def test_transactions(table=\"test_monetdb_transactions\", columndefs=['col1 INT', 'col2 VARCHAR(255)'])\n test_create_table(table, columndefs)\n \n data = [1, 'aa'] \n values = \"\"\n \n data.each do |d| values += '\\'' + d.to_s + '\\'' + ',' end\n values = values.chop # remove last ',' character \n \n insert = \"INSERT INTO \" + table + \" VALUES \" + \" ( \" + values + \" )\"\n \n @db.query('START TRANSACTION')\n @db.auto_commit(flag=false) # if @db.auto_commit?\n @db.query(insert)\n\n @db.query(\"COMMIT\") \n \n res = @db.query('SELECT * FROM ' + table)\n rows_committed = res.fetch_all\n res.free\n \n # create a save point\n @db.save\n @db.query(\"SAVEPOINT #{@db.transactions} ;\")\n \n @db.query(insert)\n \n # rollback to savepoint\n @db.query(\"ROLLBACK TO SAVEPOINT #{@db.transactions};\")\n @db.release\n \n res = @db.query('SELECT * FROM ' + table)\n rows_rolled_back = res.fetch_all\n res.free\n \n assert_equal(rows_committed, rows_rolled_back)\n \n # restore autocommit for remaining tests\n @db.auto_commit(flag=true) \n end", "def transaction\n raise Mysql2::Error, 2002 if @my.nil?\n\n if block_given?\n begin\n @my.query('START TRANSACTION WITH CONSISTENT SNAPSHOT')\n yield # Start executing the query black.\n @my.query('COMMIT')\n rescue Mysql2::Error => e\n @my.query('ROLLBACK')\n raise e\n end\n end\n end", "def start_transaction!\n fail DbMod::Exceptions::AlreadyInTransaction if @in_transaction\n @in_transaction = true\n\n query 'BEGIN'\n end", "def restart_transaction\n ActiveRecord::Base.connection.execute(\"COMMIT\")\n ActiveRecord::Base.connection.execute(\"BEGIN\")\n end", "def commit\n @db.commit\n end", "def begin_transaction\n return System.begin_transaction\n end", "def transaction(*sqls)\n begin\n db = SQLite3::Database.new(@@db_file)\n @@_set_db_handler.call(db)\n db.transaction do\n sqls.each do |sql|\n db.execute(sql)\n end\n end\n ensure\n db.close\n end\n end", "def with_transaction\n ActiveRecord::Base.transaction { yield }\n end", "def commit\n db_interface.commit\n end", "def trans\n\t\tbegin\n\t app = Aas::Application\n\t doc = app.DocumentManager.MdiActiveDocument\n\t db = doc.Database\n\t tr = doc.TransactionManager.StartTransaction\n\t yield tr, db\n\t\n\t tr.Commit\n\t tr.Dispose\n\t rescue Exception => e\n\t puts_ex e\n\t ensure\n\t tr.Dispose \n\t end\t\n\tend", "def transaction(&block)\n db\n persister\n\n result = nil\n start_time = Time.now\n begin\n db.transaction(:rollback => :reraise, :isolation => :repeatable,\n :retry_on => @retry_on_error, :num_retries => 3) do\n result = yield block\n end\n total = Time.now.to_ms - start_time.to_ms\n debug \"Transaction committed (#{total} ms)\"\n result\n rescue StandardError => e\n total = Time.now.to_ms - start_time.to_ms\n warn \"Transaction failed (#{total} ms)\"\n raise e\n ensure\n GC.start\n end\n end", "def transaction(options={}, &block)\n connection.transaction(options.update(:requires_new => true), &block)\n end", "def rollback_db_transaction\n @transaction = @connection.rollback\n end", "def commit\n IBM_DB.commit(@conn)\n end", "def transaction( &block )\n connect do | conn |\n conn.transaction do | conn |\n yield SqlRunner.new(SingleConnectionPool.new( conn ))\n end\n end\n end", "def commit\n # Nothing to do for an in memory database\n end", "def transaction(&block)\n yield\n end", "def transaction(&block)\n yield\n end", "def call\n db.transaction do\n _call_in_transaction\n end\n end", "def run_transaction(conn, op)\n retries = 0\n max_retries = 3\n while true\n retries += 1\n if retries == max_retries\n err = \"Transaction did not succeed after #{retries} retries\"\n raise err\n end\n\n begin\n op.call(conn)\n\n # If we reach this point, we were able to commit, so we break\n # from the retry loop.\n break\n\n rescue PG::TRSerializationFailure\n # This is a retry error, so we roll back the current\n # transaction and sleep for a bit before retrying. The\n # sleep time increases for each failed transaction.\n # conn.rollback\n puts \"EXECUTE SERIALIZATION_FAILURE BRANCH\"\n sleep_secs = (2**retries).floor\n puts \"Sleeping for #{sleep_secs} seconds\"\n sleep(sleep_secs)\n next\n end\n end\nend", "def checked_transaction(opts=OPTS)\n use_transaction?(opts) ? db.transaction({:server=>this_server}.merge!(opts)){yield} : yield\n end", "def checked_transaction(opts=OPTS)\n use_transaction?(opts) ? db.transaction({:server=>this_server}.merge!(opts)){yield} : yield\n end", "def within_transaction(object); end", "def transaction(&block)\n @in_transaction += 1\n begin\n yield self\n self.commit if @in_transaction > 0\n rescue => e\n self.rollback\n raise e\n ensure\n @in_transaction -= 1 unless @in_transaction == 0\n end\n end", "def transaction\n use do |connection|\n connection.transaction do |conn|\n begin\n yield conn\n rescue Rollback\n return\n end\n end\n end\n end", "def rollback_db_transaction\n log('rollback transaction', nil) { @connection.rollback }\n end", "def execute(&block)\n TempTableContext.with_context(db) {|context| execute_in_context(context, &block)}\n end", "def transaction(start_db_transaction = true)\n transaction_open = false\n begin\n if block_given?\n if start_db_transaction\n begin_db_transaction\n transaction_open = true\n end\n yield\n end\n rescue Exception => database_transaction_rollback\n if transaction_open\n transaction_open = false\n rollback_db_transaction\n end\n raise unless database_transaction_rollback.is_a? ActiveRecord::Rollback\n end\n ensure\n if transaction_open\n begin\n commit_db_transaction\n rescue Exception => database_transaction_rollback\n rollback_db_transaction\n raise\n end\n end\n end", "def transactions_to_db\n\n end", "def begin_isolated_db_transaction(isolation)\n @connection.transaction transaction_isolation_levels.fetch(isolation, isolation)\n end", "def commit\n query 'commit'\n self\n end", "def transaction(&block)\n raise InvalidDbError if @stale\n\n return transaction_in_staging(true, &block) if self.staging?\n\n begin\n transaction_in_staging(false, &block)\n ensure\n self.unstage\n end\n end", "def with_transaction(&block)\n base_model.transaction(&block)\n end", "def ddl_transaction(migration)\n if use_transaction?(migration)\n ActiveRecord::Base.transaction { yield }\n else\n yield\n end\n end", "def transaction\n sanity_check\n raise InterfaceError, \"No block given\" unless block_given?\n\n commit\n begin\n yield self\n commit\n rescue Exception\n rollback\n raise\n end\n end", "def perform( &block )\r\n @transaction_strategy.perform( &block )\r\n end", "def commit_transaction(conn, opts=OPTS)\n if in_savepoint?(conn)\n if supports_releasing_savepoints?\n log_connection_yield('Transaction.release_savepoint', conn){conn.release_savepoint(savepoint_obj(conn))}\n end\n else\n log_connection_yield('Transaction.commit', conn){conn.commit}\n end\n end", "def execute(*args)\n @db.execute(*args)\n end", "def transaction\n @pool.hold do |conn|\n @transactions ||= []\n if @transactions.include? Thread.current\n return yield(conn)\n end\n log_info(SQL_BEGIN)\n conn.execute(SQL_BEGIN)\n begin\n @transactions << Thread.current\n yield(conn)\n rescue Exception => e\n log_info(SQL_ROLLBACK)\n conn.execute(SQL_ROLLBACK)\n raise e unless Error::Rollback === e\n ensure\n unless e\n log_info(SQL_COMMIT)\n conn.execute(SQL_COMMIT)\n end\n @transactions.delete(Thread.current)\n end\n end\n end", "def commit_transaction(conn)\n log_info(TRANSACTION_COMMIT)\n conn.commit\n end", "def perform(&block)\n within_transaction do\n if before\n persist\n run_actions(&block)\n after\n rollback unless success?\n end\n end\n \n success?\n end", "def commit()\n #This is a stub, used for indexing\n end", "def commit; end", "def commit; end", "def commit; end", "def run(*args, &block)\n Sequel::Model.db.transaction(rollback: :always, auto_savepoint: true) { super }\n end", "def commit_transaction(conn)\n log_connection_execute(conn, commit_transaction_sql) unless Thread.current[:sequel_transaction_depth] > 1\n end", "def scaffold_transaction(&block)\n transaction(&block)\n end" ]
[ "0.8071028", "0.8071028", "0.79221094", "0.79221094", "0.7782967", "0.7707029", "0.7681444", "0.76744187", "0.76744187", "0.76149297", "0.7599078", "0.7599078", "0.75765014", "0.7435153", "0.73936146", "0.7392829", "0.7390553", "0.7375686", "0.7375686", "0.73635817", "0.73450476", "0.7339204", "0.73250866", "0.73250866", "0.73250866", "0.7318596", "0.7315119", "0.7276416", "0.7252883", "0.72455126", "0.7207249", "0.7181406", "0.7137927", "0.71115893", "0.71115893", "0.71093196", "0.7094643", "0.70924145", "0.7086532", "0.7056035", "0.7056035", "0.7045168", "0.7019572", "0.70088226", "0.6994317", "0.69880885", "0.69880885", "0.6983531", "0.6967523", "0.69587237", "0.69550127", "0.69186884", "0.6910862", "0.6882914", "0.6877122", "0.68684065", "0.6850407", "0.68353426", "0.68274796", "0.67915857", "0.67851484", "0.6784826", "0.67591894", "0.6758423", "0.6750754", "0.6730467", "0.6686656", "0.6678911", "0.6667907", "0.6667907", "0.66611445", "0.66560155", "0.6655317", "0.6655317", "0.6650222", "0.6620304", "0.6616153", "0.6612684", "0.6602336", "0.66023", "0.6595709", "0.6563664", "0.6561789", "0.6558245", "0.6555592", "0.6548363", "0.6548291", "0.653746", "0.65278834", "0.6519604", "0.65140015", "0.6511924", "0.6511911", "0.6509026", "0.64934677", "0.64934677", "0.64934677", "0.64819634", "0.64802134", "0.6477169" ]
0.7256859
28
Optimize database internal structure
def optimize @database.execute 'VACUUM' end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dbsize; end", "def dbsize; end", "def database_bloat\n data = select(<<-SQL, \"Database Bloat\")\n SELECT tablename AS table_name\n , ' ' AS index_name\n , reltuples::bigint AS rows\n , relpages::bigint AS pages\n , otta\n , ROUND(CASE WHEN otta = 0 OR sml.relpages = 0 OR sml.relpages = otta THEN 0.0 ELSE sml.relpages / otta::numeric END, 1) AS percent_bloat\n , CASE WHEN relpages < otta THEN 0 ELSE relpages::bigint - otta END AS wasted_pages\n , CASE WHEN relpages < otta THEN 0 ELSE (blocksize * (relpages - otta))::bigint END AS wasted_size\n , CASE WHEN relpages < otta THEN 0 ELSE blocksize * (sml.relpages - otta)::bigint END AS wasted_bytes\n FROM ( SELECT schemaname\n , tablename\n , cc.reltuples\n , cc.relpages\n , blocksize\n , CEIL((cc.reltuples * ((datahdr + pagesize - (CASE WHEN datahdr % pagesize = 0 THEN pagesize\n ELSE datahdr % pagesize END)) + nullhdr2 + 4)) / (blocksize - 20::float)\n ) AS otta\n FROM ( SELECT pagesize\n , blocksize\n , schemaname\n , tablename\n , (datawidth + (hdr + pagesize - (CASE WHEN hdr%pagesize = 0 THEN pagesize\n ELSE hdr%pagesize END)))::numeric AS datahdr\n , (maxfracsum * (nullhdr + pagesize - (CASE WHEN nullhdr % pagesize = 0 THEN pagesize\n ELSE nullhdr % pagesize END))) AS nullhdr2\n FROM ( SELECT schemaname\n , tablename\n , hdr\n , pagesize\n , blocksize\n , SUM((1 - null_frac) * avg_width) AS datawidth\n , MAX(null_frac) AS maxfracsum\n , hdr + ( SELECT 1 + count(*) / 8\n FROM pg_stats s2\n WHERE null_frac <> 0\n AND s2.schemaname = s.schemaname\n AND s2.tablename = s.tablename\n ) AS nullhdr\n FROM pg_stats s\n , ( SELECT\n ( SELECT current_setting('block_size')::numeric) AS blocksize\n , CASE WHEN SUBSTRING(SPLIT_PART(v, ' ', 2) FROM '#\"[0-9]+.[0-9]+#\"%' for '#')\n IN ('8.0','8.1','8.2') THEN 27 ELSE 23 END AS hdr\n , CASE WHEN v ~ 'mingw32' OR v ~ '64-bit' THEN 8 ELSE 4 END AS pagesize\n FROM ( SELECT version() AS v) AS foo\n ) AS constants\n GROUP BY 1, 2, 3, 4, 5\n ) AS foo\n ) AS rs\n JOIN pg_class cc\n ON cc.relname = rs.tablename\n JOIN pg_namespace nn\n ON cc.relnamespace = nn.oid\n AND nn.nspname = rs.schemaname\n AND nn.nspname <> 'information_schema'\n ) AS sml\n WHERE schemaname = 'public'\n\n UNION\n\n SELECT tablename AS table_name\n , iname AS index_name\n , ituples::bigint AS rows\n , ipages::bigint AS pages\n , iotta AS otta\n , ROUND(CASE WHEN iotta = 0 OR ipages = 0 OR ipages = iotta THEN 0.0 ELSE ipages / iotta::numeric END, 1) AS percent_bloat\n , CASE WHEN ipages < iotta THEN 0 ELSE ipages::bigint - iotta END AS wasted_pages\n , CASE WHEN ipages < iotta THEN 0 ELSE (blocksize * (ipages - iotta))::bigint END AS wasted_size\n , CASE WHEN ipages < iotta THEN 0 ELSE blocksize * (ipages - iotta) END AS wasted_bytes\n\n FROM ( SELECT schemaname\n , tablename\n , cc.reltuples\n , cc.relpages\n , blocksize\n , CEIL((cc.reltuples * ((datahdr + pagesize - (CASE WHEN datahdr % pagesize = 0 THEN pagesize\n ELSE datahdr % pagesize END)) + nullhdr2 + 4)) / (blocksize - 20::float)\n ) AS otta\n , COALESCE(c2.relname,'?') AS iname, COALESCE(c2.reltuples, 0) AS ituples, COALESCE(c2.relpages, 0) AS ipages\n , COALESCE(CEIL((c2.reltuples * (datahdr - 12)) / (blocksize - 20::float)), 0) AS iotta\n FROM ( SELECT pagesize\n , blocksize\n , schemaname\n , tablename\n , (datawidth + (hdr + pagesize - ( CASE WHEN hdr%pagesize = 0 THEN pagesize\n ELSE hdr%pagesize END)))::numeric AS datahdr\n , (maxfracsum * (nullhdr + pagesize - ( CASE WHEN nullhdr % pagesize = 0 THEN pagesize\n ELSE nullhdr % pagesize END))) AS nullhdr2\n FROM ( SELECT schemaname\n , tablename\n , hdr\n , pagesize\n , blocksize\n , SUM((1 - null_frac) * avg_width) AS datawidth\n , MAX(null_frac) AS maxfracsum\n , hdr + ( SELECT 1 + count(*) / 8\n FROM pg_stats s2\n WHERE null_frac <> 0\n AND s2.schemaname = s.schemaname\n AND s2.tablename = s.tablename\n ) AS nullhdr\n FROM pg_stats s\n , ( SELECT\n ( SELECT current_setting('block_size')::numeric) AS blocksize\n , CASE WHEN SUBSTRING(SPLIT_PART(v, ' ', 2) FROM '#\"[0-9]+.[0-9]+#\"%' for '#')\n IN ('8.0','8.1','8.2') THEN 27 ELSE 23 END AS hdr\n , CASE WHEN v ~ 'mingw32' OR v ~ '64-bit' THEN 8 ELSE 4 END AS pagesize\n FROM ( SELECT version() AS v) AS foo\n ) AS constants\n GROUP BY 1, 2, 3, 4, 5\n ) AS foo\n ) AS rs\n JOIN pg_class cc\n ON cc.relname = rs.tablename\n JOIN pg_namespace nn\n ON cc.relnamespace = nn.oid\n AND nn.nspname = rs.schemaname\n AND nn.nspname <> 'information_schema'\n LEFT JOIN pg_index i\n ON indrelid = cc.oid\n LEFT JOIN pg_class c2\n ON c2.oid = i.indexrelid\n ) AS sml\n WHERE schemaname = 'public'\n ORDER BY 1, 2\n SQL\n\n integer_columns = %w(\n otta\n pages\n pagesize\n rows\n wasted_bytes\n wasted_pages\n wasted_size\n )\n\n float_columns = %w(\n percent_bloat\n )\n\n data.each do |datum|\n integer_columns.each { |c| datum[c] = datum[c].to_i }\n float_columns.each { |c| datum[c] = datum[c].to_f }\n end\n\n data.to_a\n end", "def orm; end", "def load_physical_schema(conn, builder)\n builder.indexes{\n conn.tables.each{|table|\n conn.indexes(table).each_pair{|name, defn|\n next if defn[:unique]\n builder.index(name, {:relvar => table, :attributes => defn[:columns]})\n }\n }\n }\n end", "def create_stats_tbl\n tblName = \"#{@table}_stat\"\n creationQuery = \"select ''::text as key, ''::text as value from t_result where 1 =2\"\n # puts creationQuery\n DBConn.tblCreation(tblName, 'key', creationQuery)\n\n parseTree = @parseTree\n\n # fromPT = parseTree['SELECT']['fromClause']\n originalTargeList = parseTree['SELECT']['targetList']\n # fields = DBConn.getAllRelFieldList(fromPT)\n keyList = []\n valueList = []\n selectList = []\n pkJoinList = []\n \n pkArray = @pkList.split(',').map { |col| col.delete(' ') }\n pkArray.each do |pkcol|\n originalTargeList.each do |targetCol|\n targetField = targetCol['RESTARGET']['val']['COLUMNREF']['fields']\n if targetField.count > 1 && targetField[1].to_s == pkcol\n pkJoinList << \"t.#{pkcol} = #{targetField[0]}.#{targetField[1]}\"\n pkArray.delete(pkcol)\n end\n end\n end\n\n stats = {\n \"min\": {\"func\": \"min($COLUMN)\", \"type\": \"text\" },\n \"max\": {\"func\": \"max($COLUMN)\", \"type\": \"text\" },\n \"count\": {\"func\": \"count($COLUMN)\", \"type\": \"int\" },\n \"dist_count\": {\"func\": \"count(distinct $COLUMN)\", \"type\": \"int\" }\n }\n @all_cols.each do |field|\n # puts field.colname\n rel_alias = field.relalias\n stats.each do |stat, info|\n # SELECT\n # UNNEST(ARRAY['address_id_max','address_id_min']) AS key,\n # UNNEST(ARRAY[max(address_id),min(address_id)]) AS value\n # FROM address\n # only add N(umeric) and D(ate) type fields\n if %w(N D).include? field.typcategory\n keyList << \"'#{field.relname}_#{field.colname}_#{stat}'\"\n value = info[:func].gsub('$COLUMN',\"result.#{field.relname}_#{field.colname}\")\n # if info[:type] == 'text'\n value = \"#{value}::text\"\n # end\n valueList << value\n # valueList << \"#{stat}(result.#{field.relname}_#{field.colname})::text\"\n end\n end\n selectList << \"#{rel_alias}.#{field.colname} as #{field.relname}_#{field.colname} \"\n\n # construnct pk join cond\n if pkArray.include?(field.colname)\n pkJoinList << \"#{@table}.#{field.colname} = #{rel_alias}.#{field.colname}\"\n end\n end\n\n # # remove the where clause in query and replace targetList\n whereClauseReplacement = []\n selectQuery = ReverseParseTree.reverseAndreplace(parseTree, selectList.join(','), whereClauseReplacement)\n resultQuery = %(with result as (#{selectQuery} join #{@table} on #{pkJoinList.join(' AND ')}))\n newTargetList = \"UNNEST(ARRAY[#{keyList.join(',')}]) AS key, UNNEST(ARRAY[#{valueList.join(',')}]) as value\"\n\n newQuery = %(#{resultQuery} SELECT #{newTargetList} FROM result)\n query = %(INSERT INTO #{tblName} #{newQuery})\n # puts query\n DBConn.exec(query)\n end", "def single_object_db; end", "def data_complextest(db); end", "def initialize_db\n create_indexes_for_all_models\n end", "def rebuild(table); end", "def save\n unless @added.empty? && @deleted.empty?\n # We cannot reuse the allocated space, since the data\n # that is copied would be destroyed.\n if polymorphic?\n offset = @database.allocate_polymorphic_join_elements(@size)\n else\n offset = @database.allocate_join_elements(@size)\n end\n pairs =\n @size.times.map do |index|\n rod_id = id_for(index)\n if rod_id.is_a?(Model)\n object = rod_id\n if object.new?\n if polymorphic?\n object.reference_updaters <<\n ReferenceUpdater.for_plural(self,index,@database)\n else\n object.reference_updaters <<\n ReferenceUpdater.for_plural(self,index,@database)\n end\n next\n else\n rod_id = object.rod_id\n end\n end\n [rod_id,index]\n end.compact\n if polymorphic?\n pairs.each do |rod_id,index|\n class_id = (rod_id == 0 ? 0 : class_for(index).name_hash)\n @database.set_polymorphic_join_element_id(offset,index,rod_id,class_id)\n end\n else\n pairs.each do |rod_id,index|\n @database.set_join_element_id(offset,index,rod_id)\n end\n end\n @offset = offset\n @added.clear\n @deleted.clear\n @map.clear\n @original_size = @size\n end\n @offset\n end", "def read_new_schema\n AssociatedRecord.reset_column_information\n DeeplyAssociatedRecord.reset_column_information\n\n AssociatedRecord.cached_primary_index.send(:instance_variable_set, :@cache_key_prefix, nil)\n Item.cached_primary_index.send(:instance_variable_set, :@cache_key_prefix, nil)\n end", "def index_definition_sql(table_name, index)\n\t raise Error, \"Partial indexes are not supported for this database\" if index[:where]\n\n\t # Basic index creation DDL.\n\t sql = [\"CREATE\"]\n\t case index[:type]\n\t when :bitmap\n\t\t raise Error, \"Bitmap indexes cannot be unique\" if index[:unique]\n\t sql << 'BITMAP'\n\t when NilClass, :normal\n\t sql << 'UNIQUE' if index[:unique]\n\t else\n\t raise Error, \"Index type #{index[:type].inspect} is not supported for this database\"\n\t end\n\t index_name = index[:name] || default_index_name(table_name, index[:columns])\n\t qualified_table_name = quote_schema_table table_name\n\t sql << \"INDEX #{quote_identifier(index_name)} ON #{qualified_table_name}\"\n\t \n\t # Index columns and join indexes.\n index_join, index_columns = *index.values_at(:join,:columns)\n\t sql << literal(index_columns)\n if index_join\n\t\t raise Error, \"Join clauses are only supported for bitmap indexes\" if index[:type]!=:bitmap\n\t\t sql << \"FROM #{qualified_table_name},\"\n\t\t sql << index_columns.map{|k| quote_identifier schema_and_table(k).first }.uniq.join(', ')\n\t\t \n\t\t # TODO: Document this short-hand syntax: {:columns=>[:ref_table__ref_column], :join=>[:fk_column]}\n if Array===index_join and index_join.length==index_columns.length and index_join.all?{|k| Symbol===k}\n index_join = Hash[ index_join.map{|k| :\"#{table_name}__#{k}\" }.zip(index_columns) ]\n end\n\n\t sql << \"WHERE #{filter_expr(index_join)}\"\n\t end\n\t \n\t # Index attributes and options.\n\t sql << 'LOCAL' if index[:partitioned]\n\t sql << flag_option_sql(index, :parallel)\n\t sql << flag_option_sql(index, :logging)\n\t sql << \"TABLESPACE #{quote_identifier(index[:tablespace])}\" if index[:tablespace]\n\t sql << flag_option_sql(index, :visible, 'INVISIBLE')\n\t sql << compress_option_sql(index)\n\t sql << index[:options] if String === index[:options]\n\t sql << 'UNUSABLE' if FalseClass === index[:valid]\n\t sql.compact.join ' '\n\t end", "def index_bloat\n data = select(<<-SQL, \"Index Bloat\")\n SELECT tablename AS table_name\n , iname AS index_name\n , ituples::bigint AS rows\n , ipages::bigint AS pages\n , iotta AS otta\n , ROUND(CASE WHEN iotta = 0 OR ipages = 0 OR ipages = iotta THEN 0.0 ELSE ipages / iotta::numeric END, 1) AS percent_bloat\n , CASE WHEN ipages < iotta THEN 0 ELSE ipages::bigint - iotta END AS wasted_pages\n , CASE WHEN ipages < iotta THEN 0 ELSE (blocksize * (ipages - iotta))::bigint END AS wasted_size\n , CASE WHEN ipages < iotta THEN 0 ELSE blocksize * (ipages - iotta) END AS wasted_bytes\n\n FROM ( SELECT schemaname\n , tablename\n , cc.reltuples\n , cc.relpages\n , blocksize\n , CEIL((cc.reltuples * ((datahdr + pagesize - (CASE WHEN datahdr%pagesize = 0 THEN pagesize\n ELSE datahdr%pagesize END)) + nullhdr2 + 4)) / (blocksize - 20::float)\n ) AS otta\n , COALESCE(c2.relname,'?') AS iname, COALESCE(c2.reltuples, 0) AS ituples, COALESCE(c2.relpages, 0) AS ipages\n , COALESCE(CEIL((c2.reltuples * (datahdr - 12)) / (blocksize - 20::float)), 0) AS iotta\n FROM ( SELECT pagesize\n , blocksize\n , schemaname\n , tablename\n , (datawidth + (hdr + pagesize - (case when hdr%pagesize = 0 THEN pagesize ELSE hdr%pagesize END)))::numeric AS datahdr\n , (maxfracsum * (nullhdr + pagesize - (case when nullhdr%pagesize = 0 THEN pagesize ELSE nullhdr%pagesize END))) AS nullhdr2\n FROM ( SELECT schemaname\n , tablename\n , hdr\n , pagesize\n , blocksize\n , SUM((1 - null_frac) * avg_width) AS datawidth\n , MAX(null_frac) AS maxfracsum\n , hdr + ( SELECT 1 + count(*) / 8\n FROM pg_stats s2\n WHERE null_frac <> 0\n AND s2.schemaname = s.schemaname\n AND s2.tablename = s.tablename\n ) AS nullhdr\n FROM pg_stats s\n , ( SELECT\n (SELECT current_setting('block_size')::numeric) AS blocksize\n , CASE WHEN SUBSTRING(SPLIT_PART(v, ' ', 2) FROM '#\"[0-9]+.[0-9]+#\"%' for '#')\n IN ('8.0','8.1','8.2') THEN 27 ELSE 23 END AS hdr\n , CASE WHEN v ~ 'mingw32' OR v ~ '64-bit' THEN 8 ELSE 4 END AS pagesize\n FROM (SELECT version() AS v) AS foo\n ) AS constants\n GROUP BY 1, 2, 3, 4, 5\n ) AS foo\n ) AS rs\n JOIN pg_class cc\n ON cc.relname = rs.tablename\n JOIN pg_namespace nn\n ON cc.relnamespace = nn.oid\n AND nn.nspname = rs.schemaname AND nn.nspname <> 'information_schema'\n LEFT JOIN pg_index i\n ON indrelid = cc.oid\n LEFT JOIN pg_class c2\n ON c2.oid = i.indexrelid\n ) AS sml\n WHERE schemaname = 'public'\n ORDER BY 1, 2\n SQL\n\n integer_columns = %w(\n otta\n pages\n pagesize\n rows\n wasted_bytes\n wasted_pages\n wasted_size\n )\n\n float_columns = %w(\n percent_bloat\n )\n\n data.each do |datum|\n integer_columns.each { |c| datum[c] = datum[c].to_i }\n float_columns.each { |c| datum[c] = datum[c].to_f }\n end\n\n data.to_a\n end", "def skip_schema_queries; end", "def rufus_table_bench (bench_title, db)\n\n 2.times { puts }\n puts bench_title\n\n Benchmark.benchmark(' ' * 31 + Benchmark::Tms::CAPTION, 31) do |b|\n\n db.clear\n\n db.clear\n db.set_index('name', :lexical)\n\n b.report('inserting data (index set)') do\n DATA1.each_with_index { |e, i| db[\"key #{i.to_s}\"] = e }\n end\n\n db.clear\n db.set_index('name', :remove)\n\n b.report('inserting data (no index)') do\n DATA1.each_with_index { |e, i| db[\"key #{i.to_s}\"] = e }\n end\n\n b.report('finding all keys') do\n db.keys\n end\n b.report('finding all keys (pref)') do\n db.keys(:prefix => 'key ')\n end\n b.report('finding all keys (r pref)') do\n db.keys.select { |k| k[0, 4] == 'key ' }\n end\n b.report('finding all') do\n db.query { |q| }\n end\n b.report('find last') do\n db[\"key #{DATA.size.to_s}\"]\n end\n b.report('delete last') do\n db.delete(\"key #{DATA.size.to_s}\")\n end\n b.report('find Alphonse') do\n db.query { |q| q.add('name', :equals, DATA1[0]['name']) }\n end\n\n b.report(\"setting index (#{DATA.size} rows)\") do\n db.set_index('name', :lexical)\n end\n\n b.report('find Alphonse (index set)') do\n db.query { |q| q.add('name', :equals, DATA1[0]['name']) }\n end\n\n b.report('delete_keys_with_prefix \"1\"') do\n db.delete_keys_with_prefix('key 1')\n end\n #b.report('del keys with prefix \"2\" (m)') do\n # ks = db.keys(:prefix => 'key 2')\n # ks.each { |k| db.delete(k) }\n #end\n end\n\n db.close\nend", "def _schema_ds\n @_schema_ds ||= begin\n ds = metadata_dataset.select{[\n pg_attribute[:attname].as(:name),\n SQL::Cast.new(pg_attribute[:atttypid], :integer).as(:oid),\n SQL::Cast.new(basetype[:oid], :integer).as(:base_oid),\n SQL::Function.new(:format_type, basetype[:oid], pg_type[:typtypmod]).as(:db_base_type),\n SQL::Function.new(:format_type, pg_type[:oid], pg_attribute[:atttypmod]).as(:db_type),\n SQL::Function.new(:pg_get_expr, pg_attrdef[:adbin], pg_class[:oid]).as(:default),\n SQL::BooleanExpression.new(:NOT, pg_attribute[:attnotnull]).as(:allow_null),\n SQL::Function.new(:COALESCE, SQL::BooleanExpression.from_value_pairs(pg_attribute[:attnum] => SQL::Function.new(:ANY, pg_index[:indkey])), false).as(:primary_key),\n Sequel[:pg_type][:typtype],\n (~Sequel[Sequel[:elementtype][:oid]=>nil]).as(:is_array),\n ]}.\n from(:pg_class).\n join(:pg_attribute, :attrelid=>:oid).\n join(:pg_type, :oid=>:atttypid).\n left_outer_join(Sequel[:pg_type].as(:basetype), :oid=>:typbasetype).\n left_outer_join(Sequel[:pg_type].as(:elementtype), :typarray=>Sequel[:pg_type][:oid]).\n left_outer_join(:pg_attrdef, :adrelid=>Sequel[:pg_class][:oid], :adnum=>Sequel[:pg_attribute][:attnum]).\n left_outer_join(:pg_index, :indrelid=>Sequel[:pg_class][:oid], :indisprimary=>true).\n where{{pg_attribute[:attisdropped]=>false}}.\n where{pg_attribute[:attnum] > 0}.\n order{pg_attribute[:attnum]}\n\n # :nocov:\n if server_version > 100000\n # :nocov:\n ds = ds.select_append{pg_attribute[:attidentity]}\n\n # :nocov:\n if server_version > 120000\n # :nocov:\n ds = ds.select_append{Sequel.~(pg_attribute[:attgenerated]=>'').as(:generated)}\n end\n end\n\n ds\n end\n end", "def recalculate_usage\n # For some reason, ANALYZE TABLE doesn't update statistics in Travis' environment\n ActiveRecord::Base.connection.execute(\"OPTIMIZE TABLE #{binding.database}.stuff\")\n end", "def flushdb; end", "def write_sql model_name, model_attributes,output\n model_attributes.each do|key,query|\n sql= ActiveRecord::Base.connection();\n (sql.select_all query).each do |row|\n make_triples(row,model_name,\"\")\n end\n end\n end", "def generate_active_record(mdm_model, config)\n #do the code to create new classes based on model metadata\n #and load them up in the Ruby VM\n #below NOTE! Need table created first for AR\n #AR provides a #column_names method that returns an array of column names\n useconnection = nil\n mdm_model.mdm_objects.each do |mdm_object|\n klass = Class.new ActiveRecord::Base do\n #establish_connection(config)\n #AR to set the physical tablename\n before_save :diff_row\n self.table_name = mdm_object.name\n \n #below does composite keys!\n \n if mdm_object.mdm_primary_keys.size > 0\n pkeys = mdm_object.mdm_primary_keys.collect{|x| x.mdm_column.name.to_sym }\n self.primary_keys = pkeys\n @@pklist = pkeys\n puts \"-\" * 80\n puts mdm_object.name, pkeys.size\n end\n #note this is FK implementation\n # has_many :statuses, :class_name => 'MembershipStatus', :foreign_key => [:user_id, :group_id]\n\n def name\n \n end\n \n def diff_row\n #here we send changes out over to the queue\n #we need PK followed by row\n puts self.changes\n pkvals = {}\n changevals = {}\n self.class.primary_keys.each do |k|\n pkvals[k] = self.read_attribute(k)\n end\n changevals['colvals'] = self.changes\n changevals['pkeys'] = pkvals\n redis = Redis.new\n redis.publish(\"mdm:freemdm\", changevals.to_json)\n end\n end\n \n \n #NOTE will need some adjustments to fit legacy tables to AR\n Object.const_set mdm_object.name.capitalize, klass\n puts config.symbolize_keys\n klass.establish_connection(config.symbolize_keys) \n useconnection = klass.connection if !useconnection\n # eval(\"class #{klass.name}; attr_accessible *columns;end\")\n #\n generate_column_meta(klass)\n\n klass.connection.jdbc_connection.close\n end\n \n end", "def reset_fast_pk_lookup_sql\n @fast_pk_lookup_sql = if @simple_table && @simple_pk\n \"SELECT * FROM #@simple_table WHERE #@simple_pk = \".freeze\n end\n @fast_instance_delete_sql = if @simple_table && @simple_pk\n \"DELETE FROM #@simple_table WHERE #@simple_pk = \".freeze\n end\n end", "def rebuild_pgindex!\n self.all.each { |model| model.rebuild_pgindex! }\n end", "def _indexes_ds\n @_indexes_ds ||= begin\n if server_version >= 90500\n order = [Sequel[:indc][:relname], Sequel.function(:array_position, Sequel[:ind][:indkey], Sequel[:att][:attnum])]\n # :nocov:\n else\n range = 0...32\n order = [Sequel[:indc][:relname], SQL::CaseExpression.new(range.map{|x| [SQL::Subscript.new(Sequel[:ind][:indkey], [x]), x]}, 32, Sequel[:att][:attnum])]\n # :nocov:\n end\n\n attnums = SQL::Function.new(:ANY, Sequel[:ind][:indkey])\n\n ds = metadata_dataset.\n from{pg_class.as(:tab)}.\n join(Sequel[:pg_index].as(:ind), :indrelid=>:oid).\n join(Sequel[:pg_class].as(:indc), :oid=>:indexrelid).\n join(Sequel[:pg_attribute].as(:att), :attrelid=>Sequel[:tab][:oid], :attnum=>attnums).\n left_join(Sequel[:pg_constraint].as(:con), :conname=>Sequel[:indc][:relname]).\n where{{\n indc[:relkind]=>%w'i I',\n ind[:indisprimary]=>false,\n :indexprs=>nil,\n :indisvalid=>true}}.\n order(*order).\n select{[indc[:relname].as(:name), ind[:indisunique].as(:unique), att[:attname].as(:column), con[:condeferrable].as(:deferrable)]}\n\n # :nocov:\n ds = ds.where(:indisready=>true) if server_version >= 80300\n ds = ds.where(:indislive=>true) if server_version >= 90300\n # :nocov:\n\n ds\n end\n end", "def db; end", "def db; end", "def inner_dump( &encode_block )\n # could possibly overrride Dataset#paginate(page_no, page_size, record_count=nil)\n on_conditions = primary_keys.map{|f| [f,f]}.to_h\n (0..table_dataset.count).step(page_size).each do |offset|\n limit_dataset = table_dataset.select( *primary_keys ).limit( page_size, offset ).order( *primary_keys )\n page = table_dataset.join( limit_dataset, on_conditions ).order( *primary_keys ).qualify(table_name)\n logger.info \"#{__method__} #{table_name} #{offset}\"\n logger.debug page.sql\n page.each &encode_block\n end\n end", "def db_column_func(&block)\n new_structure = self.class.db_structure\n\n yield(new_structure)\n self.class.update_db(new_structure)\n new_structure\n end", "def prepare\n super\n\n # open the database.\n if File.exist?(@options['DATABASE'])\n @db = ::SQLite3::Database.open(@options['DATABASE'])\n else\n @db = ::SQLite3::Database.new(@options['DATABASE'])\n end\n @db.results_as_hash = true\n\n tables = @db.execute('SELECT name FROM sqlite_master WHERE type=\"table\" AND name=\"tblMessages\" ORDER BY name')\n if tables.length == 0\n tbl_def = %q( CREATE TABLE IF NOT EXISTS tblMessages (\n mID integer primary key,\n mStatus char(5),\n mTime datetime,\n mText varchar(255))\n )\n\n @db.execute( tbl_def )\n end\n\n tables = @db.execute('SELECT name FROM sqlite_master WHERE type=\"table\" AND name=\"tblOptions\" ORDER BY name')\n if tables.length == 0\n tbl_def = %q( CREATE TABLE IF NOT EXISTS tblOptions (\n oID integer primary key,\n oType varchar(255),\n oInstance varchar(255),\n oOption varchar(255),\n oValue TEXT)\n )\n\n @db.execute( tbl_def )\n end\n\n tables = @db.execute('SELECT name FROM sqlite_master WHERE type=\"table\" AND name=\"tblProcedures\" ORDER BY name')\n if tables.length == 0\n tbl_def = %q( CREATE TABLE IF NOT EXISTS tblProcedures (\n pID integer primary key,\n pTime datetime,\n pProc varchar(255))\n )\n\n @db.execute( tbl_def )\n\n tbl_def = %q( CREATE TABLE IF NOT EXISTS tblSteps (\n sID integer primary key,\n pID integer,\n sLabel varchar(255),\n sCommand TEXT,\n sFinal TEXT)\n )\n\n @db.execute( tbl_def )\n\n tbl_def = %q( CREATE TABLE IF NOT EXISTS tblParameters (\n pID integer primary key,\n sID integer,\n pLabel varchar(255),\n pValue varchar(255),\n pConstraint varchar(255))\n )\n\n @db.execute( tbl_def )\n\n tbl_def = %q( CREATE TABLE IF NOT EXISTS tblComparisons (\n cID integer primary key,\n pID integer,\n sID1 integer,\n sID2 integer,\n cRelationship varchar(255))\n )\n\n @db.execute( tbl_def )\n end\n\n tables = @db.execute('SELECT name FROM sqlite_master WHERE type=\"table\" AND name=\"tblResults\" ORDER BY name')\n if tables.length == 0\n tbl_def = %q( CREATE TABLE IF NOT EXISTS tblResults (\n rID integer primary key,\n rTime datetime,\n rTestId integer,\n rProc varchar(255),\n rProcId integer,\n rApp varchar(255))\n )\n\n @db.execute( tbl_def )\n\n tbl_def = %q( CREATE TABLE IF NOT EXISTS tblResultData (\n dID integer primary key,\n rID integer,\n dStep varchar(255),\n dStepId integer,\n dStatus varchar(255),\n dType varchar(255),\n dData TEXT)\n )\n\n @db.execute( tbl_def )\n end\n\n tables = @db.execute('SELECT name FROM sqlite_master WHERE type=\"view\" AND name=\"qryResults\" ORDER BY name')\n if tables.length == 0\n tbl_def = %q( CREATE VIEW IF NOT EXISTS qryResults AS\n SELECT R.rTestId As qTestId, R.rProc As qProc, R.rProcId As qProcId, R.rApp As qApp,\n D.dStep As qStep, D.dStepId As qStepId, D.dStatus As qStatus, D.dType As qType, D.dData As qData\n FROM tblResults As R, tblResultData As D\n WHERE D.rID = R.rID\n )\n\n @db.execute( tbl_def )\n\n tbl_def = %q( CREATE VIEW IF NOT EXISTS qryComparisons AS\n SELECT R.rTestId As qTestId, R.rProc As qProc, R.rProcId As qProcId, C.cRelationship As qRelationship,\n D1.dStep As qStep1, D1.dStepId As qStepId1, D1.dStatus As qStatus1, D1.dType As qType1, D1.dData As qData1,\n D2.dStep As qStep2, D2.dStepId As qStepId2, D2.dStatus As qStatus2, D2.dType As qType2, D2.dData As qData2\n FROM tblComparisons As C, tblResults As R, tblResultData As D1, tblResultData As D2\n WHERE C.pID = R.rProcID AND C.sID1 = D1.dStepId AND C.sID2 = D2.dStepId\n )\n\n @db.execute( tbl_def )\n end\n end", "def __foreign_key_list_ds(reverse)\n if reverse\n ctable = Sequel[:att2]\n cclass = Sequel[:cl2]\n rtable = Sequel[:att]\n rclass = Sequel[:cl]\n else\n ctable = Sequel[:att]\n cclass = Sequel[:cl]\n rtable = Sequel[:att2]\n rclass = Sequel[:cl2]\n end\n\n if server_version >= 90500\n cpos = Sequel.expr{array_position(co[:conkey], ctable[:attnum])}\n rpos = Sequel.expr{array_position(co[:confkey], rtable[:attnum])}\n # :nocov:\n else\n range = 0...32\n cpos = Sequel.expr{SQL::CaseExpression.new(range.map{|x| [SQL::Subscript.new(co[:conkey], [x]), x]}, 32, ctable[:attnum])}\n rpos = Sequel.expr{SQL::CaseExpression.new(range.map{|x| [SQL::Subscript.new(co[:confkey], [x]), x]}, 32, rtable[:attnum])}\n # :nocov:\n end\n\n ds = metadata_dataset.\n from{pg_constraint.as(:co)}.\n join(Sequel[:pg_class].as(cclass), :oid=>:conrelid).\n join(Sequel[:pg_attribute].as(ctable), :attrelid=>:oid, :attnum=>SQL::Function.new(:ANY, Sequel[:co][:conkey])).\n join(Sequel[:pg_class].as(rclass), :oid=>Sequel[:co][:confrelid]).\n join(Sequel[:pg_attribute].as(rtable), :attrelid=>:oid, :attnum=>SQL::Function.new(:ANY, Sequel[:co][:confkey])).\n join(Sequel[:pg_namespace].as(:nsp), :oid=>Sequel[:cl2][:relnamespace]).\n order{[co[:conname], cpos]}.\n where{{\n cl[:relkind]=>%w'r p',\n co[:contype]=>'f',\n cpos=>rpos\n }}.\n select{[\n co[:conname].as(:name),\n ctable[:attname].as(:column),\n co[:confupdtype].as(:on_update),\n co[:confdeltype].as(:on_delete),\n cl2[:relname].as(:table),\n rtable[:attname].as(:refcolumn),\n SQL::BooleanExpression.new(:AND, co[:condeferrable], co[:condeferred]).as(:deferrable),\n nsp[:nspname].as(:schema)\n ]}\n\n if reverse\n ds = ds.order_append(Sequel[:nsp][:nspname], Sequel[:cl2][:relname])\n end\n\n ds\n end", "def rebuild_model options = {}\n ActiveRecord::Base.connection.create_table :dummies, :force => true do |table|\n table.string :in_the_clear\n table.binary :secret\n table.binary :secret_key\n table.binary :secret_iv\n table.binary :segreto\n end\n rebuild_class options\nend", "def reset_fast_pk_lookup_sql\n @fast_pk_lookup_sql = if @simple_table && @simple_pk\n \"SELECT * FROM #{@simple_table} WHERE #{@simple_pk} = \".freeze\n end\n @fast_instance_delete_sql = if @simple_table && @simple_pk\n \"DELETE FROM #{@simple_table} WHERE #{@simple_pk} = \".freeze\n end\n end", "def rebuild_table\n @database.execute(\"SELECT ZICCLOUDSYNCINGOBJECT.ZMERGEABLEDATA1 \" +\n \"FROM ZICCLOUDSYNCINGOBJECT \" +\n \"WHERE ZICCLOUDSYNCINGOBJECT.ZIDENTIFIER=?\",\n @uuid) do |row|\n\n # Extract the blob\n gzipped_data = row[\"ZMERGEABLEDATA1\"]\n zlib_inflater = Zlib::Inflate.new(Zlib::MAX_WBITS + 16)\n gunzipped_data = zlib_inflater.inflate(gzipped_data)\n\n # Read the protobuff\n mergabledata_proto = MergableDataProto.decode(gunzipped_data)\n\n # Build list of key items\n @key_items = Array.new\n mergabledata_proto.mergable_data_object.mergeable_data_object_data.mergeable_data_object_key_item.each do |key_item|\n @key_items.push(key_item)\n end\n\n # Build list of type items\n @type_items = Array.new\n mergabledata_proto.mergable_data_object.mergeable_data_object_data.mergeable_data_object_type_item.each do |type_item|\n @type_items.push(type_item)\n end\n\n # Build list of uuid items\n @uuid_items = Array.new\n mergabledata_proto.mergable_data_object.mergeable_data_object_data.mergeable_data_object_uuid_item.each do |uuid_item|\n @uuid_items.push(uuid_item)\n end\n\n # Build Array of objects\n @table_objects = Array.new\n mergabledata_proto.mergable_data_object.mergeable_data_object_data.mergeable_data_object_entry.each do |mergeable_data_object_entry|\n @table_objects.push(mergeable_data_object_entry)\n\n # Best way I've found to set the table direction\n if mergeable_data_object_entry.custom_map\n if mergeable_data_object_entry.custom_map.map_entry.first.key == @key_items.index(\"crTableColumnDirection\") + 1 #Oddly seems to correspond to 'self'\n @table_direction = mergeable_data_object_entry.custom_map.map_entry.first.value.string_value\n end\n end\n end\n\n # Find the first ICTable, which shuld be the root, and execute\n mergabledata_proto.mergable_data_object.mergeable_data_object_data.mergeable_data_object_entry.each do |mergeable_data_object_entry|\n if mergeable_data_object_entry.custom_map and @type_items[mergeable_data_object_entry.custom_map.type] == \"com.apple.notes.ICTable\"\n parse_table(mergeable_data_object_entry)\n end\n end\n end\n end", "def table; end", "def table; end", "def table; end", "def table; end", "def table_schema(tbl)\n column_sql = <<-eosql\nSELECT rf.rdb$field_name AS \"name\",\n field.rdb$field_type AS \"type_code\",\n field.rdb$field_sub_type AS \"subtype_code\",\n-- -- -- field.rdb$field_length AS \"length\", -- -- --\n field.rdb$field_precision AS \"precision\",\n field.rdb$field_scale AS \"scale\",\n CASE\n WHEN rf.rdb$null_flag > 0\n THEN 'NO'\n ELSE 'YES'\n END AS \"nullable\",\n CASE\n WHEN iseg.rdb$index_name IS NOT NULL\n THEN 'YES'\n ELSE 'NO'\n END AS \"primary_key\"\nFROM rdb$relation_fields rf\nJOIN rdb$fields field ON rf.rdb$field_source = field.rdb$field_name\nLEFT JOIN rdb$relation_constraints c\n ON c.rdb$relation_name = rf.rdb$relation_name\n AND\n c.rdb$constraint_type = 'PRIMARY KEY'\nLEFT JOIN rdb$index_segments iseg\n ON iseg.rdb$index_name = c.rdb$index_name\n AND\n iseg.rdb$field_name = rf.rdb$field_name\nWHERE rf.rdb$relation_name = ?\nORDER BY rf.rdb$field_position, rf.rdb$field_name\neosql\n\n info = RDBI::Schema.new([], [])\n res = execute(column_sql, tbl.to_s.upcase)\n res.as(:Struct)\n while row = res.fetch[0]\n type = RDBI::Driver::Rubyfb::Types::field_type_to_rubyfb(row[:type_code], row[:subtype_code])\n info.columns << RDBI::Column.new(\n row[:name].to_sym,\n type,\n RDBI::Driver::Rubyfb::Types::rubyfb_to_rdbi(type, row[:scale]),\n row[:precision],\n row[:scale],\n row[:nullable] == 'YES',\n #nil, # metadata\n #nil, # default\n #nil, # table\n )\n (info.columns[-1].primary_key = row[:primary_key] == 'YES') rescue nil # pk > rdbi 0.9.1\n end\n return unless info.columns.length > 0\n info.tables << tbl\n info\n end", "def recalculate_usage(binding)\n # For some reason, ANALYZE TABLE doesn't update statistics in Travis' environment\n ActiveRecord::Base.connection.execute(\"OPTIMIZE TABLE #{binding.database_name}.stuff\")\n ActiveRecord::Base.connection.execute(\"OPTIMIZE TABLE #{binding.database_name}.stuff2\")\n end", "def before_load\n data_class.rebuild_table\n super\n end", "def generate_from_collection(model, items, parent_key, parent_id, poly_in)\n model_table = model.to_s.gsub(/::/, \"_\").tableize\n insert_string = \"INSERT INTO \" << model_table << \"(\"\n\n # map mongo types to sql\n sql_types = {\"String\"=>\"text\", \"BSON::ObjectId\"=>\"primary_key\", \"Time\"=>\"time\", \"Object\"=>\"integer\", \"Array\"=>\"text\", \"Integer\"=>\"integer\", \"string\"=>\"text\", \"DateTime\"=>\"datetime\", \"Date\"=>\"date\", \"Hash\"=>\"text\", \"Boolean\"=>\"boolean\", \"Float\"=>\"float\"}\n ignored_fields = ['_type', \"_keywords\"]\n\n # maintain list of fields you either want renamed or if you want them skipped by all transforms include them here\n renamed_fields = {\"_id\" => \"mongo_id\"}\n\n # hash of habtm relations from your schema - make sure the key is alpha before the value\n habtm = {'AlphaTable' => 'BetaTable'}\n\n # user mongoid to figure out relationships we need to iterate over\n relations_in = model.relations.select {|key,value| value[:relation]==Mongoid::Relations::Embedded::Many}\n single_in = model.relations.select {|key,value| value[:relation]==Mongoid::Relations::Embedded::One}\n items.each_with_index do |obj, i|\n id_to_use_next = @@id_counter[model_table] || 100000\n @@id_counter[model_table] = id_to_use_next+1 # hash to keep track of postgres id sequences\n obj_hash = {}\n obj_id = \"\"\n postgres_obj_id = \"\"\n model.fields.each do |field|\n next if ignored_fields.include?(field.first) or field.first.end_with?(\"_ids\")\n field_name = field.first\n\n val = obj.send(field.first.to_sym)\n val = val.to_s if val.is_a?(BSON::ObjectId)\n # serialize hashes & arrays\n if val.is_a?(BSON::OrderedHash)\n val = val.to_h.to_yaml\n elsif val.is_a?(Array)\n val = val.map {|v| v.is_a?(BSON::OrderedHash) ? v.to_h : v }\n val = val.to_yaml\n elsif val.is_a?(Hash)\n val = val.to_yaml\n end\n\n # clean up any apostrophes\n val = val.gsub(/'/, \"''\") unless val.nil? || !val.is_a?(String)\n val = val.to_time.utc if val.present? && ([DateTime, Date, Time].include?(field[1].options[:type]))\n val = '' if val.nil? && (field[1].options[:type] == Time || field[1].options[:type] == DateTime || field[1].options[:type] == Date)\n\n if renamed_fields.include?(field_name)\n field_name = renamed_fields[field_name]\n elsif field_name==\"number\" # we were using a number field for user facing 'id'\n field_name = \"id\"\n elsif field_name.end_with?(\"_id\")\n if val.blank?\n obj_hash[field_name] = \"\"\n else\n # throw in placeholder values with the bson id\n obj_hash[field_name] = \"#{val}_placeholder\"\n field_name = \"mongo_#{field_name.to_s.gsub(/::/, \"_\").downcase}\"\n @@id_indexed[\"#{val}_placeholder\"] << @@lines.count\n @@all_objects[\"#{val}_placeholder\"] = field_name\n end\n end\n obj_id = val if field_name==\"mongo_id\"\n postgres_obj_id = val if field_name==\"id\"\n obj_hash[field_name] = val\n end\n if poly_in # do polymorphic foreign keys\n obj_hash[\"mongo_#{poly_in}_id\"] = parent_id\n obj_hash[\"#{poly_in}_type\"] = parent_key.classify\n obj_hash[\"#{poly_in}_id\"] = \"#{parent_id}_placeholder\"\n @@all_objects[\"#{parent_id}_placeholder\"] = parent_key.classify\n @@id_indexed[\"#{parent_id}_placeholder\"] << @@lines.count\n elsif !parent_key.nil?\n obj_hash[\"mongo_#{parent_key.to_sym}_id\"] = parent_id\n obj_hash[\"#{parent_key.to_sym}_id\"] = \"#{parent_id}_placeholder\"\n @@all_objects[\"#{parent_id}_placeholder\"] = parent_key.classify\n @@id_indexed[\"#{parent_id}_placeholder\"] << @@lines.count\n end\n if postgres_obj_id.blank?\n postgres_obj_id = id_to_use_next\n obj_hash[\"id\"] = postgres_obj_id\n end\n @@id_hash[(\"#{obj_id}_placeholder\")] = postgres_obj_id.to_s\n\n # create sql from array of hashes\n insert_sql(insert_string, obj_hash)\n\n # HABTM\n target=habtm[model.to_s]\n if target\n habtm_collection = obj.send(\"#{target.downcase}_ids\")\n if habtm.size>0\n habtm_obj_hash = Hash.new\n habtm_collection.each do |target_obj|\n habtm_obj_hash[\"mongo_#{target.downcase}_id\"] = target_obj.to_s\n habtm_obj_hash[\"#{target.downcase}_id\"] = \"#{target_obj.to_s}_placeholder\"\n habtm_obj_hash[\"mongo_#{model.to_s.downcase}_id\"] = obj._id\n habtm_obj_hash[\"#{model.to_s.downcase}_id\"] = postgres_obj_id\n @@id_indexed[\"#{target_obj.to_s}_placeholder\"] << @@lines.count\n insert_sql(\"INSERT INTO #{model_table}_#{target.to_s.gsub(/::/, \"_\").tableize} (\", habtm_obj_hash)\n end\n end\n end\n single_in.each do |key, embedded|\n poly_as = embedded[:as] unless embedded[:as].blank?\n embed_collection = obj.send(key.to_sym)\n next if embed_collection.blank?\n generate_from_collection(embed_collection.class, [embed_collection], embedded[:inverse_class_name].to_s.gsub(/::/, \"_\").downcase, obj_id, poly_as) << \"\\n\\r\" unless embed_collection.blank?\n end\n relations_in.each do |key, embedded|\n poly_as = embedded[:as] unless embedded[:as].blank?\n embed_collection = obj.send(key.to_sym)\n generate_from_collection(embed_collection.first.class, embed_collection, embedded[:inverse_class_name].to_s.gsub(/::/, \"_\").downcase, obj_id, poly_as) << \"\\n\\r\" unless embed_collection.blank?\n end\n end\n end", "def indexes(table_name, name = nil)\n result = query(<<-SQL, 'SCHEMA')\n SELECT distinct i.relname, d.indisunique, d.indkey, pg_get_indexdef(d.indexrelid), t.oid\n FROM pg_class t\n INNER JOIN pg_index d ON t.oid = d.indrelid\n INNER JOIN pg_class i ON d.indexrelid = i.oid\n WHERE i.relkind = 'i'\n AND d.indisprimary = 'f'\n AND t.relname = '#{table_name}'\n AND i.relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname = ANY (current_schemas(false)) )\n ORDER BY i.relname\n SQL\n\n result.map do |row|\n index_name = row[0]\n unique = row[1] == 't'\n indkey = row[2].split(\" \")\n inddef = row[3]\n oid = row[4]\n\n columns = query(<<-SQL, \"SCHEMA\")\n SELECT a.attnum, a.attname, t.typname\n FROM pg_attribute a, pg_type t\n WHERE a.attrelid = #{oid}\n AND a.attnum IN (#{indkey.join(\",\")})\n AND a.atttypid = t.oid\n SQL\n columns = columns.inject({}){ |h, r| h[r[0].to_s] = [r[1], r[2]]; h }\n column_names = columns.values_at(*indkey).compact.map{ |a| a[0] }\n\n unless column_names.empty?\n # add info on sort order for columns (only desc order is explicitly specified, asc is the default)\n desc_order_columns = inddef.scan(/(\\w+) DESC/).flatten\n orders = desc_order_columns.any? ? Hash[desc_order_columns.map {|order_column| [order_column, :desc]}] : {}\n where = inddef.scan(/WHERE (.+)$/).flatten[0]\n # using = inddef.scan(/USING (.+?) /).flatten[0].to_sym\n\n spatial = inddef =~ /using\\s+gist/i &&\n columns.size == 1 &&\n %w[geometry geography].include?(columns.values.first[1])\n\n # IndexDefinition.new(table_name, index_name, unique, column_names, [], orders, where, nil, using)\n ::RGeo::ActiveRecord::SpatialIndexDefinition.new(table_name, index_name, unique, column_names, [], orders, where, !!spatial)\n end\n end.compact\n end", "def extract_db_content(version)\n # Only compare tables with meaningful data. Tables\n # containing temporary state, e.g., db_mutex, are not relevant here.\n # TODO: re-add other tables to check.\n include_tables = %w[master_accounts accounts positions partners partner_users]\n\n client = startup\n\n # db_columns is a temporary structure - it holds all columns for db, by table.\n # It will be merged with rows into a single data structure to facilitate\n # comparison between the 2 DBs.\n db_columns = build_columns_hash(client)\n\n # build the structure which will be used for table comparisons.\n db_columns_and_rows = build_columns_and_rows_hash(db_columns, client, include_tables)\n\n store = initialize_store('mg_db.' + version)\n persist_data(db_columns_and_rows, store)\nend", "def pack\r\n raise \"Do not execute this method in client/server mode!\" if \\\r\n @db.client?\r\n\r\n lines_deleted = @db.engine.pack_table(self)\r\n\r\n update_header_vars\r\n\r\n @db.engine.remove_recno_index(@name)\r\n @db.engine.remove_indexes(@name)\r\n create_indexes\r\n create_table_class unless @db.server?\r\n\r\n return lines_deleted\r\n end", "def _table; @table end", "def finalize(opts = {})\n # Can we use opts[:from] instead of first_source_table and override?\n model_table_name = opts[:model_table_name] || model.raw_dataset.first_source_table\n sel_col = model.columns.map { |c| Sequel.qualify(model_table_name, c) }\n return select(*sel_col) if opts[:no_finalize]\n extra_columns = [opts[:extra_columns]].flatten.compact\n extra_columns_src = extra_columns.map { |c| c.try(:expression) || c }\n\n ds = select(*sel_col,\n Sequel.function(:rank)\n .over(:partition => @opts[:partition_columns] ||\n (extra_columns_src +\n [last_record_id,\n last_branch_path].compact),\n :order => @opts[:order_columns] ||\n Sequel.qualify(model_table_name,\n :version).desc))\n if last_branch_path_context\n ds = ds.select_append(last_branch_path_context.as(:branch_path_context))\n end\n\n ds = ds.select_append(*extra_columns) unless extra_columns.empty?\n\n if opts[:extra_deleted_column]\n ds = ds.select_append(opts[:extra_deleted_column].as(:extra_deleted))\n end\n\n return ds if opts[:include_all]\n\n ds = ds.from_self.where(:rank => 1)\n unless opts[:include_deleted]\n ds = ds.where(:deleted => false)\n ds = ds.where(:extra_deleted => false) if opts[:extra_deleted_column]\n end\n ds = ds.select(*model.columns)\n ds = ds.select_append(:branch_path_context) if last_branch_path_context\n if opts[:extra_columns]\n ds = ds.select_append(*extra_columns.map { |c| c.try(:aliaz) || c.try(:column) || c })\n end\n ds\n end", "def process_queries(queries)\n\n # reset hypothetical indexes\n reset_hypothetical_indexes\n\n tables = Set.new(database_tables + materialized_views)\n\n # map tables without schema to schema\n no_schema_tables = {}\n search_path_index = Hash[search_path.map.with_index.to_a]\n tables.group_by {|t| t.split(\".\")[-1]}.each do |group, t2|\n no_schema_tables[group] = t2.sort_by {|t| [search_path_index[t.split(\".\")[0]] || 1000000, t]}[0]\n end\n\n # add tables from views\n view_tables = database_view_tables\n view_tables.each do |v, vt|\n view_tables[v] = vt.map {|t| no_schema_tables[t] || t}\n end\n\n # fully resolve tables\n # make sure no views in result\n view_tables.each do |v, vt|\n view_tables[v] = vt.flat_map {|t| view_tables[t] || [t]}.uniq\n end\n\n # filter queries from other databases and system tables\n queries.each do |query|\n\n # add schema to table if needed\n query.tables = query.tables.map {|t| no_schema_tables[t] || t}\n\n # substitute view tables\n new_tables = query.tables.flat_map {|t| view_tables[t] || [t]}.uniq\n query.tables_from_views = new_tables - query.tables\n query.tables = new_tables\n\n # check for missing tables\n query.missing_tables = !query.tables.all? {|t| tables.include?(t)}\n end\n\n # set tables\n tables = Set.new(queries.reject(&:missing_tables).flat_map(&:tables))\n\n # must come after missing tables set\n if @include_tables\n include_set = Set.new(@include_tables)\n tables.keep_if {|t| include_set.include?(t) || include_set.include?(t.split(\".\")[-1])}\n end\n\n if @exclude_tables.any?\n exclude_set = Set.new(@exclude_tables)\n tables.delete_if {|t| exclude_set.include?(t) || exclude_set.include?(t.split(\".\")[-1])}\n end\n\n # remove system tables\n tables.delete_if {|t| t.start_with?(\"information_schema.\") || t.start_with?(\"pg_catalog.\")}\n\n queries.each do |query|\n query.candidate_tables = !query.missing_tables && query.tables.any? {|t| tables.include?(t)}\n end\n\n # analyze tables if needed\n analyze_tables(tables) if tables.any? && (@analyze || @log_level == \"debug2\")\n\n # create hypothetical indexes and explain queries\n candidates = tables.any? ? create_hypothetical_indexes(queries.select(&:candidate_tables), tables) : {}\n\n # see if new indexes were used and meet bar\n new_indexes = determine_indexes(queries, candidates, tables)\n\n # display and create new indexes\n show_and_create_indexes(new_indexes, queries, tables)\n end", "def table_bloat\n data = select(<<-SQL, \"Table Bloat\")\n SELECT tablename AS table_name\n , reltuples::bigint AS rows\n , relpages::bigint AS pages\n , otta\n , ROUND(CASE WHEN otta = 0 OR sml.relpages = 0 OR sml.relpages = otta THEN 0.0\n ELSE sml.relpages / otta::numeric END, 1) AS percent_bloat\n , CASE WHEN relpages < otta THEN 0\n ELSE relpages::bigint - otta END AS wasted_pages\n , CASE WHEN relpages < otta THEN 0\n ELSE (blocksize * (relpages - otta))::bigint END AS wasted_size\n , CASE WHEN relpages < otta THEN 0\n ELSE blocksize * (sml.relpages - otta)::bigint END AS wasted_bytes\n FROM ( SELECT schemaname\n , tablename\n , cc.reltuples\n , cc.relpages\n , blocksize\n , CEIL((cc.reltuples * ((datahdr + pagesize - (CASE WHEN datahdr%pagesize = 0 THEN pagesize\n ELSE datahdr%pagesize END)) + nullhdr2 + 4)) / (blocksize - 20::float)\n ) AS otta\n FROM ( SELECT pagesize\n , blocksize\n , schemaname\n , tablename\n , (datawidth + (hdr + pagesize - (CASE WHEN hdr%pagesize = 0 THEN pagesize\n ELSE hdr%pagesize END)))::numeric\n AS datahdr\n , (maxfracsum * (nullhdr + pagesize - (CASE WHEN nullhdr%pagesize = 0 THEN pagesize\n ELSE nullhdr%pagesize END)))\n AS nullhdr2\n FROM ( SELECT schemaname\n , tablename\n , hdr\n , pagesize\n , blocksize\n , SUM((1 - null_frac) * avg_width) AS datawidth\n , MAX(null_frac) AS maxfracsum\n , hdr + ( SELECT 1 + count(*) / 8\n FROM pg_stats s2\n WHERE null_frac <> 0\n AND s2.schemaname = s.schemaname\n AND s2.tablename = s.tablename\n ) AS nullhdr\n FROM pg_stats s\n , ( SELECT\n ( SELECT current_setting('block_size')::numeric) AS blocksize\n , CASE WHEN SUBSTRING(SPLIT_PART(v, ' ', 2)\n FROM '#\"[0-9]+.[0-9]+#\"%' for '#')\n IN ('8.0','8.1','8.2') THEN 27 ELSE 23 END AS hdr\n , CASE WHEN v ~ 'mingw32' OR v ~ '64-bit' THEN 8\n ELSE 4 END AS pagesize\n FROM ( SELECT version() AS v) AS foo\n ) AS constants\n GROUP BY 1, 2, 3, 4, 5\n ) AS foo\n ) AS rs\n JOIN pg_class cc\n ON cc.relname = rs.tablename\n JOIN pg_namespace nn\n ON cc.relnamespace = nn.oid\n AND nn.nspname = rs.schemaname AND nn.nspname <> 'information_schema'\n ) AS sml\n WHERE schemaname = 'public'\n ORDER BY 1\n SQL\n\n integer_columns = %w(\n otta\n pages\n pagesize\n rows\n wasted_bytes\n wasted_pages\n wasted_size\n )\n\n float_columns = %w(\n percent_bloat\n )\n\n data.each do |datum|\n integer_columns.each { |c| datum[c] = datum[c].to_i }\n float_columns.each { |c| datum[c] = datum[c].to_f }\n end\n\n data.to_a\n end", "def update_database\n word_ids.size.times do |i|\n hidden_ids.size.times do |j|\n set_strength(word_ids[i], hidden_ids[j], 0, weights_in[i][j])\n end\n end\n\n hidden_ids.size.times do |j|\n url_ids.size.times do |k|\n set_strength(hidden_ids[j], url_ids[k], 1, weights_out[j][k])\n end\n end\n\n commit\n end", "def columns(table)\n dbh = DBI::DatabaseHandle.new(self)\n uniques = []\n dbh.execute(\"SHOW INDEX FROM #{table}\") do |sth|\n sth.each do |row|\n uniques << row[4] if row[1] == \"0\"\n end\n end \n\n ret = nil\n dbh.execute(\"SHOW FIELDS FROM #{table}\") do |sth|\n ret = sth.collect do |row|\n name, type, nullable, key, default, extra = row\n #type = row[1]\n #size = type[type.index('(')+1..type.index(')')-1]\n #size = 0\n #type = type[0..type.index('(')-1]\n\n sqltype, type, size, decimal = mysql_type_info(row[1])\n col = Hash.new\n col['name'] = name\n col['sql_type'] = sqltype\n col['type_name'] = type\n col['nullable'] = nullable == \"YES\"\n col['indexed'] = key != \"\"\n col['primary'] = key == \"PRI\"\n col['unique'] = uniques.index(name) != nil\n col['precision'] = size\n col['scale'] = decimal\n col['default'] = row[4]\n col\n end # collect\n end # execute\n \n ret\n end", "def children_table; end", "def recalculate_usage(binding)\n # For some reason, ANALYZE TABLE doesn't update statistics in Travis' environment\n ActiveRecord::Base.connection.execute(\"OPTIMIZE TABLE #{binding.database_name}.stuff\")\n ActiveRecord::Base.connection.execute(\"OPTIMIZE TABLE #{binding.database_name}.stuff2\")\n end", "def records_for_rebuild(batch_size = 1000)\n transaction do\n if use_fast_batches?\n offset = 0\n while (rows = find :all, :conditions => [ \"#{table_name}.id > ?\", offset ], :limit => batch_size).any?\n offset = rows.last.id\n yield rows, offset\n end\n else\n order = \"#{primary_key} ASC\" # fixes #212\n 0.step(self.count, batch_size) do |offset|\n yield find( :all, :limit => batch_size, :offset => offset, :order => order ), offset\n end\n end\n end\n end", "def snapshots_redact_sql_queries; end", "def build_indexes\n Schema::Physical::Indexes.new\n end", "def extract_dbc_data\n tabledata = {}\n\n curt = nil\n @db.each do |r|\n unless r.nil?\n if r.objecttype == \"Table\"\n # This is a related table\n tabledata[r.objectid] = {name: r.objectname, fields: []}\n elsif r.objecttype == \"Field\"\n # This is a related field. The parentid points to the table object\n\n # create using the parentid if the parentid is still unknown.\n tabledata[r.parentid] = {name: \"UNKNOWN\", fields: []} unless tabledata.has_key?(r.parentid)\n tabledata[r.parentid][:fields] << r.objectname\n end\n end\n end\n\n # now we need to transform the resulting array-hash to a direct mapping (changed to support older Ruby versions)\n # { tablename => [fieldnames] }\n @tables = {}\n tabledata.each{|k, v| @tables[v[:name]] = v[:fields] }\n end", "def sql_modes; end", "def create_table_structure(columns_to_include)\n if @table_created\n @columns.each do |column|\n begin\n ActiveRecord::Schema.add_column(@new_table_name, column[:name], column[:type]) if (columns_to_include.blank? or columns_to_include.include? column[:name])\n rescue\n puts \"Couldnt add field #{column[:name].downcase}\"\n end\n end\n ActiveRecord::Schema.add_column(@new_table_name,\"the_geom\", :geometry,:null => false)\n ActiveRecord::Schema.add_index(@new_table_name,\"the_geom\",:spatial => true)\n end\n end", "def create_table_structure(columns_to_include)\n if @table_created\n @columns.each do |column|\n begin\n ActiveRecord::Schema.add_column(@new_table_name, column[:name], column[:type]) if (columns_to_include.blank? or columns_to_include.include? column[:name])\n rescue\n puts \"Couldnt add field #{column[:name].downcase}\"\n end\n end\n ActiveRecord::Schema.add_column(@new_table_name,\"the_geom\", :geometry,:null => false)\n ActiveRecord::Schema.add_index(@new_table_name,\"the_geom\",:spatial => true)\n end\n end", "def create_likely_qda_data\n ActiveRecord::Base.connection.execute likely_qda_sql\n end", "def columns(table)\r\n tab = @handle.describe_table(table)\r\n cols = tab.columns\r\n cols.collect! do |col|\r\n column_metadata_to_column_info(col)\r\n end\r\n\r\n dbh = DBI::DatabaseHandle.new(self)\r\n\r\n primaries = {}\r\n dbh.select_all(<<EOS, tab.obj_schema, tab.obj_name) do |row|\r\nselect column_name\r\n from all_cons_columns a, all_constraints b\r\n where a.owner = b.owner\r\n and a.constraint_name = b.constraint_name\r\n and a.table_name = b.table_name\r\n and b.constraint_type = 'P'\r\n and b.owner = :1\r\n and b.table_name = :2\r\nEOS\r\n primaries[row[0]] = true\r\n end\r\n\r\n indices = {}\r\n uniques = {}\r\n dbh.select_all(<<EOS, tab.obj_schema, tab.obj_name) do |row|\r\nselect a.column_name, a.index_name, b.uniqueness\r\n from all_ind_columns a, all_indexes b\r\n where a.index_name = b.index_name\r\n and a.index_owner = b.owner\r\n and a.table_owner = :1\r\n and a.table_name = :2\r\nEOS\r\n col_name, index_name, uniqueness = row\r\n indices[col_name] = true\r\n uniques[col_name] = true if uniqueness == 'UNIQUE'\r\n end\r\n\r\n dbh.select_all(<<EOS, tab.obj_schema, tab.obj_name).collect do |row|\r\nselect column_id, column_name, data_default\r\n from all_tab_columns\r\n where owner = :1\r\n and table_name = :2\r\nEOS\r\n col_id, col_name, default = row\r\n\r\n col = cols[col_id.to_i - 1]\r\n col_name = col['name']\r\n\r\n if default && default[0] == ?'\r\n default = default[1..-2].gsub(/''/, \"'\")\r\n end\r\n\r\n col['indexed'] = indices[col_name] || false\r\n col['primary'] = primaries[col_name] || false\r\n col['unique'] = uniques[col_name] || false\r\n col['default'] = default\r\n col\r\n end\r\n rescue OCIException => err\r\n raise_dbierror(err)\r\n end", "def run_single(us)\n #debugger\n initial_data = []\n column_names = us.get_column_names\n num_rows = 1\n \n c = 0\n 0.upto(num_rows - 1) do\n o = OpenStruct.new\n class << o\n attr_accessor :id\n end\n\n #turn the outgoing object into a VO if neccessary\n map = VoUtil.get_vo_definition_from_active_record(us.class.to_s)\n if map != nil\n o._explicitType = map[:outgoing]\n end\n \n #first write the primary \"attributes\" on this AR object\n column_names.each_with_index do |v,k|\n k = column_names[k]\n val = us.send(:\"#{k}\")\n eval(\"o.#{k}=val\")\n end\n \n associations = us.get_associates\n if(!associations.empty?)\n #debugger\n #now write the associated models with this AR\n associations.each do |associate|\n na = associate[1, associate.length]\n ar = us.send(:\"#{na}\")\n\t\n ok = false;\n if (ar.instance_of? Array)\n if (!ar.empty? && !ar.nil?)\n ok=true;\n end\n else\n isArray = true\n end\n\n if (isArray && !ar.nil?) || ok \n\n\t if(use_single?(ar))\n initial_data_2 = run_single(ar) #recurse into single AR method for same data structure\n else\n initial_data_2 = run_multiple(ar) #recurse into multiple AR method for same data structure\n end\n eval(\"o.#{na}=initial_data_2\")\n end\n end\n end\n #\tdebugger\n # if us.single? # apparenty this is not needed since it seems to always return nil :)\n initial_data = o \n # else\n # initial_data << o\n # end\n c += 1\n end\n initial_data\n end", "def select(db); end", "def select(db); end", "def fixup_columns\n @columns.each_index do |idx| \n\n if @columns[idx][:searchable].nil? then\n @columns[idx][:searchable] = @model_class.column_methods_hash[@columns[idx][:id].intern] ? true : false\n end\n @columns[idx][:query] = @columns[idx][:id] if @columns[idx][:query].nil?\n \n if @columns[idx][:sortable].nil? then\n @columns[idx][:sortable] = @columns[idx][:query] == false ? false : true\n end\n \n end\n end", "def write_lobs(table_name, klass, attributes)\n # is class with composite primary key>\n is_with_cpk = klass.respond_to?(:composite?) && klass.composite?\n if is_with_cpk\n id = klass.primary_key.map {|pk| attributes[pk.to_s] }\n else\n id = quote(attributes[klass.primary_key])\n end\n klass.columns.select { |col| col.sql_type =~ /LOB$/i }.each do |col|\n value = attributes[col.name]\n # RSI: changed sequence of next two lines - should check if value is nil before converting to yaml\n next if value.nil? || (value == '')\n value = value.to_yaml if col.text? && klass.serialized_attributes[col.name]\n uncached do\n if is_with_cpk\n lob = select_one(\"SELECT #{col.name} FROM #{table_name} WHERE #{klass.composite_where_clause(id)} FOR UPDATE\",\n 'Writable Large Object')[col.name]\n else\n lob = select_one(\"SELECT #{col.name} FROM #{table_name} WHERE #{klass.primary_key} = #{id} FOR UPDATE\",\n 'Writable Large Object')[col.name]\n end\n @connection.write_lob(lob, value, col.type == :binary)\n end\n end\n end", "def rebuild_depth_cache_sql!\n update_all(\"#{depth_cache_column} = #{ancestry_depth_sql}\")\n end", "def aggregate_db_storage_type; end", "def create_full_rst_tbl(preserve_null_pk = true)\n unless defined? @full_rst_tbl\n self.all_cols_select\n self.pk_full_list\n if preserve_null_pk\n renamed_pk_col = @pk_full_list.map { |pk| \"#{pk['col']} as #{pk['alias']}_pk\" }.join(', ')\n else\n renamed_pk_col = @pk_full_list.map do |pk|\n pkcol = @all_cols.find{|col| col.colname == pk['colname'] and col.relname==pk['relname']}\n \"COALESCE(#{pk['col']},#{pkcol.null_replacement}) as #{pk['alias']}_pk\"\n end.join(',')\n end\n targetListReplacement = \"#{renamed_pk_col},#{@all_cols_select}\"\n query = ReverseParseTree.reverseAndreplace(@parseTree, targetListReplacement, '')\n @full_rst_tbl = \"#{@table}_full_rst\"\n pk = @pk_full_list.map { |pk| \"#{pk['alias']}_pk\" }.join(', ')\n # binding.pry\n DBConn.tblCreation(@full_rst_tbl, pk, query)\n\n # unless preserve_null_pk\n # DBConn.update_null_columns(@full_rst_tbl,pk)\n # end\n # if is_plain_query()\n # query = QueryBuilder.create_tbl(@full_rst_tbl, pk, query)\n # DBConn.exec(query)\n # else\n # query = QueryBuilder.create_tbl(@full_rst_tbl, '', query)\n # DBConn.exec(query)\n\n # # not_null_query = pk_list.flat.map{|pk| \"#{pk} is not null\"}.join(' AND ')\n # # add index on not null columns\n # pk_not_null = @pk_full_list.map { |pk| \"#{pk['alias']}_pk is not null\"}.join(' OR ')\n # create_indx = \"CREATE UNIQUE INDEX idx_#{@full_rst_tbl} on #{@full_rst_tbl} (#{pk}) where #{pk_not_null}\"\n # pp create_indx\n # DBConn.exec(create_indx)\n\n # end\n end\n return @full_rst_tbl\n end", "def small_search(relation)\n Post.transaction do\n Post.connection.execute(\"SET LOCAL enable_seqscan = off\")\n Post.connection.execute(\"SET LOCAL enable_indexscan = off\")\n relation.load\n end\n end", "def prepared_sql\n case prepared_type\n when :select, :all, :each\n # Most common scenario, so listed first.\n select_sql\n when :first\n clone(:limit=>1).select_sql\n when :insert_select\n insert_select_sql(*prepared_modify_values)\n when :insert, :insert_pk\n insert_sql(*prepared_modify_values)\n when :update\n update_sql(*prepared_modify_values)\n when :delete\n delete_sql\n else\n select_sql\n end\n end", "def build_database(file, database_user_key,database_movie_key)\n file.each do |line| \n\t\t\ttokens=line.split(\"\\t\")\n\t\t\tuser_id=tokens[0]\n\t\t\tmovie_id=tokens[1]\n\t\t\trate_score=tokens[2]\n\t\t\tadd_entry(database_user_key,user_id,movie_id,rate_score)\n\t\t\tadd_entry(database_movie_key,movie_id,user_id,rate_score)\n end \n end", "def setup_db_index\n self.copy_tables.each do |t|\n no_sql_connection.create_pre_mongified_id_index(t.name)\n end\n end", "def flush_db\n [ 'active_sources', 'data_records', 'semantic_properties', 'semantic_relations', 'workflows'].reverse.each { |f| ActiveRecord::Base.connection.execute \"DELETE FROM #{f}\" }\n # Also remove the \"unsaved cache\" for the wrappers (may be important during testing)\n TaliaCore::SemanticCollectionWrapper.instance_variable_set(:'@unsaved_source_cache', {})\n end", "def lut_write_to_cache_sql_mode(lut_key)\n batch_count = 0\n\n self.where(lut_options(lut_key)[:where]).find_in_batches(:batch_size => lut_options(lut_key)[:batch_size]) do |items| #FIXME not DRY here\n lut = {}\n block = lut_proc(lut_key)\n\n items.each do |item|\n if block\n block.call(lut, item)\n else\n # HACK: doing a merge w/o replacing, just add elements for childs and may transform elements to array\n k = item.send(lut_key)\n v = item.id\n if lut[k]\n v = [v] unless v.respond_to?(:concat)\n lut[k] = [lut[k]] unless lut[k].respond_to?(:concat)\n lut[k].concat(v)\n else\n lut[k] = v\n end\n end\n end\n\n batch_count = self.lut_write_cache_item(lut_key, batch_count, lut)\n batch_count += 1\n end\n\n batch_count\n end", "def sql_state; end", "def redisize_sql_metas key, attres\n model_name = key[1]\n primary_key = key[2]\n\n # binding.pry\n attres.map do |attrs|\n metakey = [\"meta\", model_name, primary_key, attrs[primary_key]]\n\n parse_instance_attrs(model_name, attrs, key)\n assign_reverse_key(metakey, key)\n end\n\n parse_sql_key(key)\n end", "def refresh_cache\n self.class.connection.execute \"UPDATE #{self.class.table_name} SET #{ancestors_count_column}=#{self.ancestors.size}, #{descendants_count_column}=#{self.descendants.size} WHERE id=#{self.id}\"\n end", "def to_sql_query_info(offset)\n \"SELECT * FROM #{@model.quoted_table_name} WHERE \" +\n \" #{quote_column(@model.primary_key)} = (($id - #{offset}) / #{ThinkingSphinx.indexed_models.size})\"\n end", "def sql\n <<-SQL\n -- Search learning paths\n SELECT DISTINCT\n c.id,\n c.name,\n c.course_code,\n c.settings,\n cc.content,\n 'learning_path' AS content_type,\n c.id AS learning_path_id,\n 0 AS learning_objective_id\n FROM courses c\n LEFT OUTER JOIN fearless_taggings ts\n ON ts.taggable_id = c.id AND ts.taggable_type = 'LearningPath'\n LEFT OUTER JOIN fearless_tags t\n ON t.id = ts.tag_id\n LEFT OUTER JOIN fearless_custom_contents cc\n ON cc.contentable_id = c.id AND cc.contentable_type = 'LearningPath'\n WHERE 0=0\n #{construct_account_clause}\n #{construct_course_worklow_clause}\n #{construct_name_sql}\n #{construct_all_tags_search('t', 'name')}\n UNION ALL\n -- Search learning objectives\n SELECT DISTINCT\n cm.id,\n cm.name,\n c.course_code,\n c.settings,\n cc.content,\n 'learning_objective' AS content_type,\n cm.context_id::bigint AS learning_path_id,\n cm.id::bigint AS learning_objective_id\n FROM context_modules cm\n INNER JOIN courses c\n ON c.id = cm.context_id\n AND cm.context_type = 'Course'\n LEFT OUTER JOIN fearless_taggings ts\n ON ts.taggable_id = cm.id AND ts.taggable_type = 'LearningObjective'\n LEFT OUTER JOIN fearless_tags t\n ON t.id = ts.tag_id\n LEFT OUTER JOIN fearless_custom_contents cc\n ON cc.contentable_id = cm.id AND cc.contentable_type = 'LearningObjective'\n WHERE 0=0\n #{construct_account_clause}\n #{construct_generic_workflow_clause('cm')}\n #{construct_name_sql('cm')}\n #{construct_all_tags_search('t', 'name')}\n UNION ALL\n -- Search learning learning_event\n SELECT DISTINCT\n ct.id,\n ct.title AS name,\n c.course_code,\n c.settings,\n cc.content,\n 'learning_event' AS content_type,\n ct.context_id::bigint AS learning_path_id,\n ct.context_module_id::bigint AS learning_objective_id\n FROM content_tags ct\n INNER JOIN courses c\n ON c.id = ct.context_id\n AND ct.context_type = 'Course'\n LEFT OUTER JOIN fearless_taggings ts\n ON ts.taggable_id = ct.id AND ts.taggable_type = 'LearningEvent'\n LEFT OUTER JOIN fearless_tags t\n ON t.id = ts.tag_id\n LEFT OUTER JOIN fearless_custom_contents cc\n ON cc.contentable_id = ct.id AND cc.contentable_type = 'LearningEvent'\n WHERE 0=0\n #{construct_account_clause}\n #{construct_generic_workflow_clause('ct')}\n #{construct_name_sql('ct', 'title')}\n #{construct_all_tags_search('t', 'name')}\n SQL\n end", "def fast_each(options)\n i=minimum(\"#{table_name}.#{primary_key}\", options)\n \n # not all the backends always sort primay_key columns so do it manuall\n options.update(:order => \"#{table_name}.#{primary_key} ASC\")\n \n i=minimum(\"#{table_name}.#{primary_key}\", options) or return\n # first the first object by id\n yield(o=find_one(i, {}))\n # as long as we keep finding objects, keep going\n while o\n with_scope(:find => {:conditions => [ \"#{table_name}.#{primary_key} > ?\", i]} ) do\n if o=find_initial(options)\n i=o.send primary_key\n yield(o) \n end\n end\n end\n end", "def late_materialization(db)\n part_result_1 = []\n part_result_2 = []\n db[\"country\"][\"dic\"].each_with_index do |v, i|\n if v == \"GER\" \n db[\"country\"][\"av\"].each.with_index do |val, ind|\n if val == i then part_result_1.push(ind) end\n end\n break\n end\n end\n db[\"gender\"][\"dic\"].each_with_index do |v, i|\n if v == \"M\" \n db[\"gender\"][\"av\"].each.with_index do |val, ind|\n if val == i then part_result_2.push(ind) end\n end\n break\n end\n end \n # with this returned array we can materialize the\n # row the length of the array is the aggregation\n # in this case the materialiation is not neccessarry\n (part_result_1 & part_result_2)\nend", "def each_for_write\n \t \tbegin\n\t \t \thelper = TransHelper.new\n\t \t \thelper.trans([:Layer]) do |tr,db,tables|\n\t \t \t \tself.each do |sel_object|\n\t \t \t \t\tent = helper.get_obj(sel_object.ObjectId, :Read)\n\t \t \t \t\tlayer = helper.get_obj(ent.LayerId)\n\t \t \t \t\tif !layer.IsLocked\n\t \t \t \t\t\tent.UpgradeOpen\n\t \t \t \t \tyield ent \n\t \t \t \t end\t\n\t \t \t \tend\n\t \t \tend\n \t rescue Exception => e\n\t\t puts_ex e\n\t \t end\t\n \t end", "def initialize_db_schema\n @db.exec(\n 'create table if not exists nodes\n (\n id SERIAL PRIMARY KEY,\n host VARCHAR(256) UNIQUE,\n last_seen TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,\n current_report INTEGER\n );\n\n create table if not exists collections\n (\n id SERIAL PRIMARY KEY,\n collection VARCHAR(256) NOT NULL,\n archived BOOL DEFAULT FALSE\n );\n\n create table if not exists reports\n (\n id SERIAL PRIMARY KEY,\n node_id INTEGER NOT NULL,\n file_handle INTEGER,\n status INTEGER NOT NULL,\n collection INTEGER NOT NULL,\n time TIMESTAMP NOT NULL,\n resources_changed INTEGER NOT NULL,\n resources_failed INTEGER NOT NULL,\n resources_total INTEGER NOT NULL,\n runtime REAL NOT NULL,\n new_report BOOL DEFAULT FALSE,\n FOREIGN KEY (node_id) REFERENCES nodes (id),\n FOREIGN KEY (collection) REFERENCES collections(id)\n );\n\n create table if not exists schemaversion\n (\n id SERIAL PRIMARY KEY,\n complete BOOL DEFAULT FALSE,\n comment VARCHAR(256) NOT NULL\n );\n create table if not exists reportdata\n (\n id SERIAL PRIMARY KEY,\n report bytea\n );\n '\n )\n end", "def columns\n Proc.new do\n <<-SQL\n id integer primary key autoincrement,\n sample_id integer, --foreign key to reference the original revision\n \n --these are all true contemporaneous of the edit, post or pre-edit may be different\n account_creation timestamp(20), --this should be the entry in the logevents call, but if we exceed the max number of requests, we won't get it\n account_lifetime integer, --this is the lifetime of the account in seconds\n edits_last_second integer, --want a figure to show recent activity do buckets instead\n edits_last_minute integer,\n edits_last_hour integer,\n edits_last_day integer,\n edits_last_week integer,\n edits_last_month integer,\n edits_last_year integer,\n total_edits integer,\n --rights_grant_count \n --rights_removal_count\n --groups string,\n FOREIGN KEY(sample_id) REFERENCES irc_wikimedia_org_en_wikipedia(id) --these foreign keys probably won't be enforced b/c sqlite doesn't include it by default--TODO this foreign table name probably shouldn't be hard coded\nSQL\n end\n end", "def index\n # @schemas = Schema.where(default: false)\n\n ####CLEANUP FUNCTION - SUPER QUICK###\n orphans = Schema.where(disabled: false).where([ \"id NOT IN (?)\", Account.select(:schema_id).where(banned: false, created: true)])\n orphans.update_all(disabled: true)\n\n @schemas = Schema.ordered_by_use\n\n end", "def scaffold_get_objects(options)\n optionshash = {}\n data = self.all\n if options[:conditions]\n conditions = options[:conditions]\n if conditions && Array === conditions && conditions.length > 0\n if String === conditions[0]\n data = data.all(:conditions => conditions)\n else\n conditions.each do |cond|\n next if cond.nil?\n data = case cond\n when Hash, String then data.all(:conditions => [cond.gsub(\"NULL\",\"?\"),nil])\n when Array then \n if cond.length==1\n data.all(:conditions => [cond[0].gsub(\"NULL\",\"?\"),nil])\n else\n data.all(:conditions => cond)\n end\n when Proc then data.all(&cond)\n end\n end\n end\n end\n end\n slice = nil\n if options[:limit]\n startpos = options[:offset] || 0\n endpos = options[:limit]\n slice = [startpos,endpos]\n end\n # TODO includes break SQL generation\n # optionshash[:links] = options[:include] if options[:include]\n # optionshash[:links] = [optionshash[:links]] unless optionshash[:links].is_a?(Array)\n if options[:order] then\n optionshash[:order] = get_ordering_options(options[:order])\n end\n if slice then\n q = data.all(optionshash).slice(*slice)\n else\n q = data.all(optionshash)\n end\n #p repository.adapter.send(\"select_statement\",q.query)\n q.to_a\n end", "def prepare_schema\n begin\n ActiveRecord::Schema.define do\n # a twobot instance\n create_table :twobots do |t|\n t.string :name\n t.string :status, :null => false, :default => \"active\"\n end\n add_index :twobots, :name\n \n # has many twitter searches\n create_table :searches do |t|\n t.integer :twobot_id, :null => false\n t.string :query, :null => false\n t.integer :last_twid, :null => false, :default => 0\n t.timestamp :last_run\n t.integer :last_result_count\n t.integer :total, :default => 0\n end\n add_index :searches, :query\n \n # a search has many actions\n create_table :actions do |t|\n t.integer :search_id, :null => false\n t.text :code\n end\n \n # cache of tweets\n create_table :tweets do |t|\n t.integer :twid, :null => false\n t.string :from_user\n t.string :to_user\n t.integer :from_user_id\n t.integer :to_user_id\n t.string :text\n t.string :profile_image_url\n t.timestamp :created_at\n end\n add_index :tweets, :twid\n end\n rescue\n end\n end", "def copy_structure\n logger.info \"Copying structure for table #{name} from watched to audit database\"\n db.query(\"CREATE TABLE #{audit} LIKE #{watched}\")\n add_copied_at_field\n add_has_delta_field\n add_last_version_field\n add_has_violation_field\n add_deletion_flag\n end", "def get_all_entries()\n jso = Hash.new()\n \n # Connect to database\n dbe = MIDB::API::Dbengine.new(@engine.config, @db)\n dblink = dbe.connect()\n rows = dbe.query(dblink, \"SELECT * FROM #{self.get_structure.values[0].split('/')[0]};\")\n if rows == false\n return MIDB::Interface::Server.json_error(400, \"Bad Request\")\n end\n # Iterate over all rows of this table\n rows.each do |row|\n jso[row[\"id\"]] = self.get_structure\n self.get_structure.each do |name, dbi|\n table = dbi.split(\"/\")[0]\n field = dbi.split(\"/\")[1]\n # Must-match relations (\"table2/field/table2-field->row-field\")\n if dbi.split(\"/\").length > 2\n match = dbi.split(\"/\")[2]\n matching_field = match.split(\"->\")[0]\n row_field = match.split(\"->\")[1]\n query = dbe.query(dblink, \"SELECT #{field} FROM #{table} WHERE #{matching_field}=#{row[row_field]};\")\n else\n query = dbe.query(dblink, \"SELECT #{field} from #{table} WHERE id=#{row['id']};\")\n end\n if query == false\n return MIDB::Interface::Server.json_error(400, \"Bad Request\")\n end\n jso[row[\"id\"]][name] = dbe.length(query) > 0 ? dbe.extract(query,field) : \"unknown\"\n jso[row[\"id\"]][name] = @hooks.format_field(name, jso[row[\"id\"]][name])\n end\n end\n @hooks.after_get_all_entries(dbe.length(rows))\n return jso\n end", "def save_columns!\n # NetzkeFieldList.update_list_for_current_authority(global_id, columns(false), original_data_class.name) if persistent_config_enabled?\n end", "def static_sql_bytesize\n @static_sql_bytesize ||= insert_part.bytesize + update_part.bytesize + 2\n end", "def initDb\n @db.create_table! :tasks do\n primary_key :id\n Integer :created\n Integer :changed\n Integer :wake\n Integer :completed\n Integer :status\n String :title\n String :path\n String :data, :text=>TRUE\n end\n\n @db.create_table! :locks do\n Integer :id, :primary_key=>TRUE\n Integer :locked\n end\n end", "def orm_patches_applied; end", "def initialize_flag_definitions_table\n # start drop this later\n if connection.tables.include?(FlagDefTableName) && !connection.columns(FlagDefTableName).detect{|x|x.name=='id'}\n connection.drop_table FlagDefTableName\n end\n # end drop this later\n \n unless connection.tables.include?(FlagDefTableName)\n connection.create_table FlagDefTableName.to_sym do |t|\n t.column \"model\", :string, :limit => 100\n t.column \"position\", :integer\n t.column \"flag_name\", :string, :limit => 100\n t.column \"flag_type\", :text, :limit => 6\n t.column \"default\", :boolean, :default => false\n end\n connection.add_index FlagDefTableName, [\"model\"], :name => \"by_model\"\n puts \"Flag definitions table created...OK\"\n else\n #puts \"Flag definitions table found...OK\"\n end\n end", "def create_entity_columns\n entity = self.name.constantize\n entity.reset_column_information\n \n entity.columns.each_with_index do |column, i|\n f = self.entity_columns.detect{ |col| col.name == column.name }\n unless f\n f = self.entity_columns.build(:name => column.name)\n association = entity.reflect_on_all_associations(:belongs_to).detect{|a| a.options[:foreign_key] == column.name }\n association ||= entity.reflect_on_association column.name.sub(/_id$/, '').to_sym if column.name.ends_with? '_id'\n if association\n f.ref_type = :Entity.to_s\n f.ref_name = association.options[:polymorphic] ? 'POLYMORPHIC' : association.class_name\n end\n end\n \n f.term = \"label.#{column.name}\" unless f.term\n f.col_type = column.type.to_s\n f.col_size = column.limit if(column.respond_to?(:limit))\n f.nullable = column.null\n f.def_val = (column.default == nil) ? nil : column.default.to_s\n f.disp_rank = i * 10\n f.save!\n end\n\n uniq_index_def = ActiveRecord::Base.connection.indexes(entity.table_name).find { |index_def| index_def.unique == true }\n \n # unique ranks by getting unique index\n if(uniq_index_def)\n uniq_seq = 1\n uniq_index_def.columns.each do |uniq_col_name|\n column = self.entity_columns.where(\"name = ?\", uniq_col_name).first\n if(column)\n column.uniq_rank = uniq_seq * 10\n column.save!\n uniq_seq += 1\n end\n end\n end\n \n return self.entity_columns\n end", "def clean_db\n\n puts \"Caching wikipedia links\"\n @wikipedia_cache = {}\n Artist.all\n .where('wikilink IS NOT NULL')\n .pluck( 'name' , 'wikilink' )\n .each { |result| @wikipedia_cache[ key_name(result[0]) ] = result[1] }\n\n puts \"Cleaning db\"\n PlayerState.delete_all\n PlayListSong.delete_all\n Song.delete_all\n Album.delete_all\n Artist.delete_all\n end", "def create_tables_mysql()\n\n\t\tdbrows = @database.query <<-SQL\n\t\tCREATE TABLE IF NOT EXISTS invoices (\n\t\t\torganization VARCHAR(255),\n\t\t\tupdated DATE,\n\t\t\tamount FLOAT,\n\t\t\tamount_outstanding FLOAT,\n\t\t\tdiscount FLOAT,\n\t\t\tinvoice_id VARCHAR(255),\n\t\t\tnumber VARCHAR(255) UNIQUE PRIMARY KEY,\n\t\t\tmatter VARCHAR(255),\n\t\t\tstatus VARCHAR(255),\n\t\t\tdate DATE\n\t\t\t);\n\t\t\tSQL\n\n\t\t\tdbrows = @database.query <<-SQL\n\t\t\tCREATE TABLE IF NOT EXISTS invoice_lines (\n\t\t\t\tline_id INTEGER,\n\t\t\t\t_order INTEGER,\n\t\t\t\tnumber VARCHAR(255),\n\t\t\t\tinvoice_id VARCHAR(255),\n\t\t\t\tdescription TEXT,\n\t\t\t\tamount FLOAT,\n\t\t\t\tfirst_expense_id INTEGER,\n\t\t\t first_time_entry_id TEXT,\n\t\t\t\tline_item_date VARCHAR(255),\n\t\t\t\tperson VARCHAR(512),\n\t\t\t\tname VARCHAR(255),\n\t\t\t\tunit_cost FLOAT,\n\t\t\t\tquantity FLOAT,\n\t\t\t\ttype VARCHAR(255),\n\t\t\t\tmatter VARCHAR(255),\n\t\t\t\tupdated DATE\n\t\t\t\t);\n\t\t\t\tSQL\n\t\t## Add code here if you end up making additional tables\n\n\t\tdbrows = @database.query <<-SQL\n\t\tCREATE TABLE IF NOT EXISTS projects (\n\t\t\tmatter VARCHAR(255) UNIQUE,\n\t\t\tname VARCHAR(255),\n\t\t\tproject_id VARCHAR(255),\n\t\t\thour_budget FLOAT\n\t\t\t);\n\t\t\tSQL\n\t\tdbrows = @database.query('DELETE FROM projects;')\n\n\t\tdbrows = @database.query <<-SQL\n\t\tCREATE TABLE IF NOT EXISTS time_entries (\n\t\t\ttime_entry_id INTEGER UNIQUE,\n\t\t\tstaff_id INTEGER,\n\t\t\tproject_id VARCHAR(255),\n\t\t\ttask_id INTEGER,\n\t\t\thours FLOAT,\n\t\t\tdate DATE,\n\t\t\tnotes TEXT,\n\t\t\tbilled INTEGER\n\t\t);\n\t\tSQL\n\t\tdbrows = @database.query('DELETE FROM time_entries;')\n\n\t\tdbrows = @database.query <<-SQL\n\t\tCREATE TABLE IF NOT EXISTS staff (\n\t\t\tperson VARCHAR(512),\n\t\t\tfirst_name VARCHAR(255),\n\t\t\tlast_name VARCHAR(255),\n\t\t\tstaff_id INTEGER,\n\t\t\trate FLOAT\n\t\t);\n\t\tSQL\n\t\tdbrows = @database.query('DELETE FROM staff;')\n\n\t\tdbrows = @database.query <<-SQL\n\t\tCREATE TABLE IF NOT EXISTS contractor (\n\t\t\tname VARCHAR(255),\n\t\t\tcontractor_id INTEGER,\n\t\t\trate FLOAT\n\t\t);\n\t\tSQL\n\t\tdbrows = @database.query('DELETE FROM contractor;')\n\n\t\tdbrows = @database.query <<-SQL\n\t\t\t-- estimates from Contractors\n\t\t\tCREATE OR REPLACE VIEW unbilled_contractor_time AS\n\t\t\t\tSELECT projects.matter, contractor.name as person, SUM(time_entries.hours * contractor.rate) as estimated\n\t\t\t\tFROM time_entries\n\t\t\t\tINNER JOIN projects ON projects.project_id = time_entries.project_id\n\t\t\t\tINNER JOIN contractor ON contractor.contractor_id = time_entries.staff_id\n\t\t\t\tWHERE projects.matter <> '' AND time_entries.billed = 0\n\t\t\t\tGROUP BY projects.matter, person\n\t\t\t;\n\t\t\tSQL\n\n\t\t\t#UNBILLED STAFF TIME view\n\t\tdbrows = @database.query <<-SQL\n\t\t-- Estimates from Staff\n\t\t\tCREATE OR REPLACE VIEW unbilled_staff_time AS\n\t\t\t\t\tSELECT projects.matter, staff.person, SUM(time_entries.hours * staff.rate) as estimated\n\t\t\t\t\tFROM time_entries\n\t\t\t\t\tINNER JOIN projects ON projects.project_id = time_entries.project_id\n\t\t\t\t\tINNER JOIN staff ON staff.staff_id = time_entries.staff_id\n\t\t\t\t\tWHERE projects.matter <> '' AND time_entries.billed = 0\n\t\t\t\t\tGROUP BY projects.matter, staff.person\n\t\t\t\t\tORDER BY projects.matter, staff.person\n\t\t\t\t\t;\n\n\t\t\tSQL\n\n\t\t#UNBILLED ALL TIME TEMP view\n\t\tdbrows = @database.query <<-SQL\n\t\t\tCREATE OR REPLACE VIEW unbilled_all_time_temp AS\n\t\t\t\t\tSELECT * FROM unbilled_staff_time\n\t\t\t\t\tUNION\n\t\t\t\t\tSELECT * FROM unbilled_contractor_time;\n\t\t\tSQL\n\n\t\t#UNBILLED ALL TIME view\n\t\tdbrows = @database.query <<-SQL\n\t\t\t-- All unbilled time\n\t\t\t\tCREATE OR REPLACE VIEW unbilled_all_time AS\n\t\t\t\t\tSELECT *\n\t\t\t\t\tFROM unbilled_all_time_temp\n\t\t\t\t\tGROUP BY unbilled_all_time_temp.matter, unbilled_all_time_temp.person\n\t\t\t\t;\n\t\t\t\tSQL\n\n\t\t#mysql version\n\t\t#INVOICES GROUP view\n\t\tdbrows = @database.query <<-SQL\n\t\t\tCREATE OR REPLACE VIEW invoices_grouped AS\n\t\t\t\t\tSELECT\n\t\t\t\t\t\t\tinvoices.organization\n\t\t\t\t\t\t, invoices.matter\n\t\t\t\t\t\t, SUM((CASE WHEN status <> 'draft' THEN invoices.amount ELSE 0 END)) AS lifetime_billed\n\t\t\t\t\t\t, SUM((CASE WHEN YEAR(invoices.date) = YEAR(NOW()) THEN\n\t\t\t\t\t\t\t(CASE WHEN status <> 'draft' THEN invoices.amount ELSE 0 END)\n\t\t\t\t\t\t\tELSE 0 END)) as ytd_billed\n\t\t\t\t\t\t, SUM((CASE WHEN status = 'draft' THEN invoices.amount ELSE 0 END)) as draft_invoices\n\t\t\t\t\t\t, SUM((CASE WHEN status <> 'draft' THEN invoices.amount_outstanding ELSE 0 END)) as outstanding\n\t\t\t\t\tFROM invoices\n\t\t\t\t\tGROUP BY invoices.organization, invoices.matter;\n\t\t\tSQL\n\n\t\t#ORGNAMES view\n\t\tdbrows = @database.query <<-SQL\n\t\t\tCREATE OR REPLACE VIEW orgnames AS\n\t\t\t\tSELECT DISTINCT invoices_grouped.organization, unbilled_all_time.matter\n\t\t\t\tFROM unbilled_all_time\n\t\t\t\tLEFT OUTER JOIN invoices_grouped ON SUBSTR(invoices_grouped.matter,1,4) = SUBSTR(unbilled_all_time.matter,1,4);\n\t\t\tSQL\n\n\t\tdbrows = @database.query <<-SQL\n\t\t\t-- Build a deconstructed invoice from the line items\n\t\t\tCREATE OR REPLACE VIEW apportioned_invoices AS\n\t\t\t\t\t\tSELECT\n\t\t\t\t\t\t\t\tinvoices.organization\n\t\t\t\t\t\t\t, invoices.updated\n\t\t\t\t\t\t\t, (invoice_lines.amount - (invoice_lines.amount * invoices.discount/100)) as amount\n\t\t\t\t\t\t\t, (invoices.amount_outstanding/invoices.amount) * (invoice_lines.amount - (invoice_lines.amount * invoices.discount/100)) as amount_outstanding\n\t\t\t\t\t\t\t, invoices.discount\n\t\t\t\t\t\t\t, invoices.invoice_id\n\t\t\t\t\t\t\t, invoices.number\n\t\t\t\t\t\t\t, invoices.matter\n\t\t\t\t\t\t\t, invoices.status\n\t\t\t\t\t\t\t, invoices.date\n\t\t\t\t\t\t\t, (CASE WHEN invoice_lines.person = '' OR invoice_lines.person IS NULL THEN 'no name found' ELSE invoice_lines.person END) as person\n\t\t\t\t\t\t\t, invoice_lines.type\n\t\t\t\t\t\tFROM invoice_lines\n\t\t\t\t\t\tLEFT OUTER JOIN invoices ON invoices.invoice_id = invoice_lines.invoice_id\n\t\t\t\t\t\t;\n\t\t\tSQL\n\n\t\tdbrows = @database.query <<-SQL\n\t\t\tCREATE OR REPLACE VIEW apportioned_invoices_grouped AS\n\t\t\t\t\tSELECT\n\t\t\t\t\t\t\tapportioned_invoices.organization\n\t\t\t\t\t\t, apportioned_invoices.matter\n\t\t\t\t\t\t, apportioned_invoices.person\n\t\t\t\t\t\t, SUM((CASE WHEN apportioned_invoices.status <> 'draft' THEN apportioned_invoices.amount ELSE 0 END)) AS lifetime_billed\n\t\t\t\t\t\t, SUM((CASE WHEN YEAR(apportioned_invoices.date) = YEAR(NOW()) THEN\n\t\t\t\t\t\t\t(CASE WHEN apportioned_invoices.status <> 'draft' THEN apportioned_invoices.amount ELSE 0 END)\n\t\t\t\t\t\t\tELSE 0 END)) as ytd_billed\n\t\t\t\t\t\t, SUM((CASE WHEN apportioned_invoices.status = 'draft' THEN apportioned_invoices.amount ELSE 0 END)) as draft_invoices\n\t\t\t\t\t\t, SUM(unbilled_all_time.estimated) AS estimated\n\t\t\t\t\t\t, SUM((CASE WHEN apportioned_invoices.status <> 'draft' THEN apportioned_invoices.amount_outstanding ELSE 0 END)) as outstanding\n\t\t\t\t\tFROM apportioned_invoices\n\t\t\t\t\tLEFT OUTER JOIN unbilled_all_time ON apportioned_invoices.matter = unbilled_all_time.matter AND apportioned_invoices.person = unbilled_all_time.person\n\t\t\t\t\tGROUP BY apportioned_invoices.organization, apportioned_invoices.matter, apportioned_invoices.person;\n\t\t\t\t\t;\n\t\t\t\tSQL\n\n\t\t#UNBILLED ALL TIME WITH ORGS view\n\t\tdbrows = @database.query <<-SQL\n\t\t\t-- need to fix up matters\n\t\t\tCREATE OR REPLACE VIEW unbilled_all_time_with_orgs AS\n\t\t\t\t\tSELECT (CASE WHEN orgnames.organization <> '' THEN orgnames.organization ELSE 'unknown organization' END) as organization, unbilled_all_time.person, unbilled_all_time.matter, unbilled_all_time.estimated\n\t\t\t\t\tFROM unbilled_all_time\n\t\t\t\t\tLEFT OUTER JOIN orgnames ON orgnames.matter = unbilled_all_time.matter\n\t\t\t\t;\n\t\t\tSQL\n\n\t\tdbrows = @database.query <<-SQL\n\t\t\tCREATE OR REPLACE VIEW dashboard_detailed AS\n\t\t\t\t\tSELECT\n\t\t\t\t\t\t\tapportioned_invoices_grouped.organization\n\t\t\t\t\t\t, apportioned_invoices_grouped.matter\n\t\t\t\t\t\t, apportioned_invoices_grouped.person\n\t\t\t\t\t\t, apportioned_invoices_grouped.lifetime_billed\n\t\t\t\t\t\t, apportioned_invoices_grouped.ytd_billed\n\t\t\t\t\t\t, apportioned_invoices_grouped.outstanding\n\t\t\t\t\t\t, apportioned_invoices_grouped.draft_invoices\n\t\t\t\t\t\t, (CASE WHEN unbilled_all_time_with_orgs.estimated IS NULL THEN 0 ELSE unbilled_all_time_with_orgs.estimated END) as estimated\n\t\t\t\t\t\t, (apportioned_invoices_grouped.lifetime_billed + apportioned_invoices_grouped.draft_invoices + (CASE WHEN unbilled_all_time_with_orgs.estimated IS NULL THEN 0 ELSE unbilled_all_time_with_orgs.estimated END)) as all_spend\n\t\t\t\t\tFROM apportioned_invoices_grouped\n\t\t\t\t\tLEFT OUTER JOIN unbilled_all_time_with_orgs ON unbilled_all_time_with_orgs.matter = apportioned_invoices_grouped.matter AND unbilled_all_time_with_orgs.person = apportioned_invoices_grouped.person\n\t\t\t\t\tUNION\n\t\t\t\t\t-- Find the other half of the FULL OUTER JOIN\n\t\t\t\t\tSELECT\n\t\t\t\t\t\t\tunbilled_all_time_with_orgs.organization\n\t\t\t\t\t\t, unbilled_all_time_with_orgs.matter\n\t\t\t\t\t\t, unbilled_all_time_with_orgs.person\n\t\t\t\t\t\t, 0 as lifetime_billed\n\t\t\t\t\t\t, 0 as ytd_billed\n\t\t\t\t\t\t, 0 as outstanding\n\t\t\t\t\t\t, 0 as draft_invoices\n\t\t\t\t\t\t, unbilled_all_time_with_orgs.estimated\n\t\t\t\t\t\t, unbilled_all_time_with_orgs.estimated as all_spend\n\t\t\t\t\tFROM unbilled_all_time_with_orgs\n\t\t\t\t\tLEFT OUTER JOIN apportioned_invoices_grouped ON apportioned_invoices_grouped.matter = unbilled_all_time_with_orgs.matter AND apportioned_invoices_grouped.person = unbilled_all_time_with_orgs.person\n\t\t\t\t\tWHERE apportioned_invoices_grouped.matter IS NULL;\n\t\t\t\t;\n\t\t\tSQL\n\n\n\n\t\t\t#DASHBOARD view\n\t\t\tdbrows = @database.query <<-SQL\n\t\t\t\tCREATE OR REPLACE VIEW dashboard AS\n\t\t\t\t\tSELECT\n\t\t\t\t\t \torganization\n\t\t\t\t\t , matter\n\t\t\t\t\t , SUM(lifetime_billed) as lifetime_billed\n\t\t\t\t\t , SUM(ytd_billed) as ytd_billed\n\t\t\t\t\t , SUM(outstanding) as outstanding\n\t\t\t\t\t , SUM(draft_invoices) as draft_invoices\n\t\t\t\t\t , SUM(estimated) as estimated\n\t\t\t\t\t , SUM(all_spend) as all_spend\n\t\t\t\t\tFROM dashboard_detailed\n\t\t\t\t\tGROUP BY matter\n\t\t\t\t;\n\t\t\tSQL\n\tend", "def build_sql(field)\n statement_sql = ''\n keys = primary_keys(field)\n puts \" Altering #{keys.length} records for: #{field.name} => #{field.output_type}\".blue\n keys.each do |primary_key|\n record_sql = \"UPDATE #{field.table} \"\n record_sql += \"SET #{field.column} = '#{out_val(field)}' \"\n record_sql += \"#{where_and(record_sql)} #{field.primary_key_col} = #{primary_key};\\n\"\n statement_sql += record_sql\n end \n statement_sql\nend", "def skip_schema_queries=(_arg0); end" ]
[ "0.593455", "0.593455", "0.5806628", "0.57233053", "0.56322974", "0.5581713", "0.5546168", "0.553777", "0.5522078", "0.5470438", "0.5452566", "0.54294217", "0.5413984", "0.54117346", "0.5408922", "0.5405584", "0.53872675", "0.53866017", "0.5379075", "0.53425556", "0.5318195", "0.528113", "0.5272989", "0.5266397", "0.5266316", "0.5266316", "0.526217", "0.52601546", "0.52358013", "0.5228528", "0.5220914", "0.5207856", "0.52070415", "0.51966345", "0.51966345", "0.51966345", "0.51966345", "0.517771", "0.5160405", "0.51538086", "0.51537734", "0.51508474", "0.51474226", "0.5136519", "0.51319206", "0.5129033", "0.512335", "0.51207614", "0.5119382", "0.5111766", "0.51076204", "0.5102765", "0.5101811", "0.5094169", "0.5084078", "0.5081753", "0.50804913", "0.5071421", "0.5071421", "0.5060478", "0.50519013", "0.5049347", "0.50457084", "0.50457084", "0.503779", "0.50355387", "0.5029952", "0.5018288", "0.50102574", "0.50091183", "0.50040656", "0.50007915", "0.4995596", "0.4988581", "0.49793324", "0.4973513", "0.49723363", "0.49670577", "0.49616435", "0.4961389", "0.49448645", "0.49443817", "0.49424592", "0.49368915", "0.49307954", "0.49301973", "0.4926971", "0.49262", "0.4925692", "0.49251553", "0.4920276", "0.49185053", "0.4912442", "0.49102128", "0.49066177", "0.4905179", "0.4903585", "0.48987418", "0.48981625", "0.48932686" ]
0.5702872
4
Register a new Word entry
def insert_word(word) word = word.clone word_id = word.id @index_sense ||= @database.prepare('insert into senses_fts values (?, ?)') @index_literal ||= @database.prepare('insert into literals_fts values (?, ?, ?)') @insert_word ||= @database.prepare('insert into words values (?, ?)') word.literals.each do |literal| @index_literal.execute(literal.text, word_id, literal.priority) end word.readings.each do |reading| @index_literal.execute(reading.text, word_id, reading.priority) end word.senses.each do |sense| @index_sense.execute(sense.texts.join(';'), word_id) end # We can derive ID from table ID. word.id = 0 @insert_word.execute(word_id, Word.encode(word)) end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_word(word)\n \n end", "def add_word(word)\r\n \r\n end", "def add_word(word, definition)\r\n @word << {Word: word, Definition: definition}\r\n # puts \"This is @word: #{@word}\"\r\n end", "def add(word, w)\n if word != \"\" \n cur = self\n word.downcase.each_char do |character|\n modified_char = @@vowels.include?(character) ? '*' : character\n cur.kids[modified_char] = SpellingTrie.new if not cur.kids.has_key? modified_char\n cur = cur.kids[modified_char]\n end\n cur.words_here.add(w)\n end\n end", "def add(entry) \r\n if entry.is_a? String\r\n @@words[entry] = nil\r\n else \r\n learn = entry.to_a \r\n learnit = learn[0]\r\n @@words[learnit[0]] = learnit[1]\t\r\n end\r\nend", "def add(word)\n\t\tif word.class == String\n\t\t\tword = {word => nil}\n\t\tend\n\n\t\tword.each do |key, value|\n\t\t\t@entries[key] = value\n\t\tend\n\tend", "def add(word)\n end", "def save_word(word)\n id = MinWords::DB[:words].insert word_text: word[:word_text]\n save_definition(id, word[:definition_text])\n id\n end", "def add(word)\n Hunspell.Hunspell_add(self,word.to_s)\n end", "def create\n word = Word.new(params['term'], false)\n\n # check to see if the word exists on the board\n board = Boggle.new\n exists = board.search(word.term, params['tiles'])\n\n # now, see if the word is an actual English word\n if exists\n\n # check the cache first - only go to the oxford dictionary if\n # it doesn't exist in our simple word cache\n word_cache_store = MiniCache::Store.new\n if !word_cache_store.get(word.term).nil?\n word.exists = true\n else\n # allowing exceptions from underlying api to throw 500 status code\n # and generate a log entry - to protect system from downstream\n # latency and failures, use circuit breaker\n word.exists = @dictionary_gateway.exists(word.term)\n word_cache_store.set(word.term, '') if word.exists\n\n end\n end\n\n render json: word\n end", "def add(word)\n change_wordlist(@words + [ Word.new(word) ] )\n end", "def add_word(word)\n chars = word.chars\n current = @root\n\n chars.each do |c|\n current = add_character(c, current)\n current.word_count += 1\n end\n current.is_word = true\n end", "def create\n @word = Word.find_or_create_by_text(params[:text])\n\n respond_to do |format|\n if @word.save\n format.json { render json: @word, status: :created,\n location: @word}\n else\n format.json { render json: @word.errors, status: :unprocessable_entity }\n end\n end\n end", "def add_word(word)\n node = root\n word.chars.each do |c|\n node.children[c] = TrieNode.new unless node.children.key?(c)\n node = node.children[c]\n end\n node.word = true\n end", "def add_word(word)\n @word_counts[word] ||= 0\n current_count = @word_counts[word] += 1\n @total_word_count += 1\n # If this word is new for this Group, it might be new for the entire Groupie\n @groupie.add_word(word) if current_count == 1\n nil\n end", "def add_word word #Function shovels individual strings into the dictionary array\n @dictionary << word\n end", "def new_word(sax, author_id, offset = 0)\n Word.new.tap do |word|\n word.id = offset + sax.id[/\\d+/].to_i\n word.author_id = author_id\n word.word = sax.word\n word.grammar = sax.grammar\n word.accents = sax.accents\n word.uris = sax.uris\n end\nend", "def <<(word)\n @words << word\n end", "def <<(word)\n @words << word\n end", "def create\n if params[:format]==nil\n user=current_user\n else\n user= current_user\n deny_access if user.nil?\n end\n if params[:word].nil?\n return\n end\n userid = params[:user_id]\n single_word = params[:word][:word].strip()\n old_word = Word.find_by_word_and_user_id(single_word, userid)\n if old_word==nil\n @word = Word.create({:word=>single_word, :user_id => userid})\n @word.translate!\n [email protected]\n @word.add_tag_by_name('unfamiliar')\n else\n @word = old_word\n [email protected]_attribute(:updated_at, Time.now)\n @word.save\n end\n\n if operation_success\n respond_to do |format|\n format.json { render :json => @word, :status => :created }\n format.html { redirect_to(user_path(@word.user).to_s) }\n end\n end\n end", "def add_word(word)\n node = @root\n word.each_char do |c|\n node.children[c] ||= TrieNode.new\n node = node.children[c]\n end\n node.is_end = true\n end", "def add_word(word)\n if word.length == 0\n @isend = true\n else\n @childs[word[0]] ||= WordDictionary.new\n @childs[word[0]].add_word(word[1..-1])\n end\n nil\n end", "def add(word)\n @words[@words.size] = word\n end", "def addword(word)\n if @graph.words[word].nil? then\n @graph.words[word] = {}\n end\n end", "def <<(word)\n @words << word\n end", "def add_tag_to_word(word, *args)\n http_method = :post\n path = '/word/{word}/tag'\n path.sub!('{word}', word.to_s)\n\n # Ruby turns all key-value arguments at the end into a single hash\n # e.g. Wordnik.word.get_examples('dingo', :limit => 10, :part_of_speech => 'verb')\n # becomes {:limit => 10, :part_of_speech => 'verb'}\n last_arg = args.pop if args.last.is_a?(Hash)\n last_arg = args.pop if args.last.is_a?(Array)\n last_arg ||= {}\n\n # Look for a kwarg called :request_only, whose presence indicates\n # that we want the request itself back, not the response body\n if last_arg.is_a?(Hash) && last_arg[:request_only].present?\n request_only = true\n last_arg.delete(:request_only)\n end\n\n params = last_arg\n body ||= {}\n request = Wordnik::Request.new(http_method, path, :params => params, :body => body)\n request_only ? request : request.response.body\n end", "def create\n cache.set(word, 1)\n end", "def add_entry_to_buffer(key_words,key_lang,xlated,entry)\n\t\tdict_id=entry['dict_id']\n\t\t##debug(sprintf(\"add_entry id(%s),lang(%s),[%s]\\n\",dict_id,key_lang,key_words))\n\t\t##\n\t\t## parse json-data\n\t\t##\n\t\tbegin\n\t\t\tentry_data=JSON.parse(entry['data'])\n\t\trescue\n\t\t\t##printf(\"Data Error![%s]\\n\",entry['data'])\n\t\t\treturn \n\t\tend\n\t\t##debug(sprintf(\"ENTRYDATA [%s]\\n\",entry_data.inspect()))\n\t\tgrammar=entry_data[\"#GRAMMAR\"]\n\t\tgrammar=\"\" if grammar==nil\n\t\tcategory=entry_data[\"#CATEGORY\"]\n\t\tcategory=\"\" if category==nil\n\t\tentry_num=entry_data[\"#ENTRY_NUM\"]\n\t\tentry_num=\"\" if entry_num==nil\n\t\thash_key= dict_id + key_words + key_lang + grammar + category\n\t\t@entries[hash_key]=Hash.new if @entries[hash_key]==nil\n\t\t@entries[hash_key][entry_num]=\n\t\t\t\t{\"dict_id\"=>dict_id,\n\t\t\t\t\"key_words\"=>key_words,\n\t\t\t\t\"grammar\"=>grammar,\n\t\t\t\t\"category\"=>category,\n\t\t\t\t\"key_lang\"=>key_lang,\n\t\t\t\t\"xlated_word\"=>xlated,\n\t\t\t\t\"entry_data\"=>entry_data}\n\tend", "def initialize(word, citation)\n @word = word\n @citation = citation\n DictionaryEntry.all << self\n end", "def set_Word(value)\n set_input(\"Word\", value)\n end", "def set_Word(value)\n set_input(\"Word\", value)\n end", "def set_Word(value)\n set_input(\"Word\", value)\n end", "def set_Word(value)\n set_input(\"Word\", value)\n end", "def set_Word(value)\n set_input(\"Word\", value)\n end", "def set_Word(value)\n set_input(\"Word\", value)\n end", "def set_Word(value)\n set_input(\"Word\", value)\n end", "def set_Word(value)\n set_input(\"Word\", value)\n end", "def set_Word(value)\n set_input(\"Word\", value)\n end", "def create\n @word = Word.new(params[:word])\n\n respond_to do |format|\n if @word.save\n flash[:notice] = 'Word was successfully created.'\n format.html { redirect_to(@word) }\n format.xml { render :xml => @word, :status => :created, :location => @word }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @word.errors, :status => :unprocessable_entity }\n end\n end\n end", "def add(word)\n build(@tree, word)\n end", "def insert(word)\n node = @root\n word.each_char { |c|\n child = node.insert_child(c)\n node = child\n }\n node.is_word = true\n end", "def create\n new_word = Dinosaurus.new\n \n end", "def register(type, words, fillers: [])\n registry[type.to_s] = Dictionnary.new(words, fillers: fillers)\n end", "def save\n\t\tNg2::HashDb.add(@word, serialize)\n\tend", "def insert(word)\n node = @root\n word.each_char do |c|\n node[c] ||= {}\n node = node[c]\n end\n node[END_OF_WORD] = END_OF_WORD\n end", "def <<(term)\n raise \"No document defined\" unless defined? @document\n unless @terms.include? term\n @terms[term] = @terms.length\n end\n i = @terms[term]\n @index[@document] ||= 0\n @index[@document] |= 1 << i\n end", "def add(word)\n @root.create_final_path(word.chars.reverse + [Path::DELIMITER])\n\n Word.new(word.chars).to_delimited_paths.each do |path|\n @root.create_final_path(path.letters)\n end\n\n self\n end", "def add(word)\n node = @root\n word.downcase!\n word.each_char do |letter|\n node[letter] ||= Hash.new\n node = node[letter]\n end\n node[:end] = true\n end", "def set_word\n @word = Word.friendly.find(params[:id])\n end", "def add word\n super word.clone\n end", "def add_noun!(word)\n @redis.rpush(\"store:nouns\", Unicode.downcase(word))\n end", "def add_word_view(word, *args)\n http_method = :post\n path = '/word/{word}/wordView'\n path.sub!('{word}', word.to_s)\n\n # Ruby turns all key-value arguments at the end into a single hash\n # e.g. Wordnik.word.get_examples('dingo', :limit => 10, :part_of_speech => 'verb')\n # becomes {:limit => 10, :part_of_speech => 'verb'}\n last_arg = args.pop if args.last.is_a?(Hash)\n last_arg = args.pop if args.last.is_a?(Array)\n last_arg ||= {}\n\n # Look for a kwarg called :request_only, whose presence indicates\n # that we want the request itself back, not the response body\n if last_arg.is_a?(Hash) && last_arg[:request_only].present?\n request_only = true\n last_arg.delete(:request_only)\n end\n\n params = last_arg\n body ||= {}\n request = Wordnik::Request.new(http_method, path, :params => params, :body => body)\n request_only ? request : request.response.body\n end", "def add_generation(word)\n\t\t@generations << word\n\t\t@modifications << nil\n\tend", "def create\n @word = Word.new(params[:word])\n\n respond_to do |format|\n if @word.save\n format.html { redirect_to @word, notice: 'Word was successfully created.' }\n format.json { render json: @word, status: :created, location: @word }\n else\n format.html { render action: \"new\" }\n format.json { render json: @word.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n #debug\n logout(\"Creating a new Word instance...\")\n \n @word = Word.new(params[:word])\n\n ################################################\\\n text_id = @word.text_id\n \n # text = Text.find(:id => text_id.to_i)\n text = Text.find(text_id.to_i)\n \n if text != nil\n \n logout(\"text.id => \" + text.id.to_s)\n \n else\n \n logout(\"text == nil\")\n \n end\n # logout(text.)\n \n # text.words << @word\n # logout(text.words << @word)\n # res = text.words << @word\n \n # logout(\"res=\" + res)\n # @word.texts << text #=> \"text.. << ..word\" or \"word.. << ..text\"\n #=> Both generate the same entry in the join table\n #=> So, you only need to do either of the two.\n ################################################/\n\n logout(\"Saving a new Word instance...\")\n\n respond_to do |format|\n if @word.save\n #debug\n text.words << @word\n \n format.html { redirect_to @word, notice: 'Word was successfully created.' }\n format.json { render json: @word, status: :created, location: @word }\n else\n format.html { render action: \"new\" }\n format.json { render json: @word.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @word = Word.new(params[:word])\n\n respond_to do |format|\n if @word.save\n format.html { redirect_to @word, notice: 'Word was successfully created.' }\n format.json { render json: @word, status: :created, location: @word }\n else\n format.html { render action: \"new\" }\n format.json { render json: @word.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @word = Word.new(params[:word])\n\n respond_to do |format|\n if @word.save\n format.html { redirect_to @word, notice: 'Word was successfully created.' }\n format.json { render json: @word, status: :created, location: @word }\n else\n format.html { render action: \"new\" }\n format.json { render json: @word.errors, status: :unprocessable_entity }\n end\n end\n end", "def insert(word)\n node = self\n\n word.each_char do |char|\n\n unless node.children.has_key?(char)\n node.children[char] = WordTree.new\n end\n\n node = node.children[char]\n end\n\n node.instance_variable_set(:@word, word)\n\n self\n end", "def <<(obj)\n @words << obj\n end", "def add_letter(letter, locations)\n # for each occurrence of a letter, add the letter to the correct location in $build-word\n locations.each { |location| $build_word[location] = letter }\n word_test() # then run word_test()\nend", "def set_word\n @word = Word.find_by_text(params[:text])\n end", "def add_ata!(word)\n @redis.rpush(\"store:atas\", Unicode.downcase(word))\n end", "def add(entry)\n @entries[entry.tag] = entry\n end", "def add_word_pair(start, stop, name)\n @attribute_manager.add_word_pair(start, stop, name)\n end", "def add_word_pair(start, stop, name)\n @attribute_manager.add_word_pair(start, stop, name)\n end", "def add_word_pair(start, stop, name)\n @am.add_word_pair(start, stop, name)\n end", "def add(word)\n \t\tif word.length > 0 # there was a zero length char after this, idk\n if @children[word[0,1]] # if current letter exists in hash, add to it\n\t @children[word[0,1]].add(word[1, word.length])\n\t\t\t\t@children[word[0,1]].word = true if (word.length == 1)\n\t else # if the letter doesn't exist, create it\n\t @children[word[0,1]] = LetterTree.new(word[1, word.length])\n\t\t\t\t@children[word[0,1]].word = true if (word.length == 1)\n\t end\n\t\tend\n\tend", "def add_entry(name)\n Library.create(name: name)\n end", "def augment_from_dict(dict)\n path = DICTS[dict.to_sym] || raise(InvalidDictionaryName, dict)\n @word_file = File.open path\n puts \"#{self.class} will augment AppleSpell with words from #{path}\"\n augment!\n end", "def do_add_entry(entry,html_txt,infos)\n\t\t##debug(sprintf(\"ADD DICT ENTRY(%s)\\nINF(%s)\\n\",entry.inspect(),infos.inspect()))\n\n\t\t\n\t\tkey_words= entry['key_words']\n\t\tinfos[:key_words]=key_words\n\t\tinfos[:key_lang]=entry['key_lang']\n\n\n\t\tattr = \"\"\n\t\t[\"grammar\",\"category\"].each{|tag|\n\t\t\tattr << \"/\" << entry[tag] if entry[tag] != \"\"\n\t\t}\n\t\tattr << \"/\" if attr != \"\"\n\t\tprimary_lang=primary_lang(entry['dict_id'])\n\t\tif primary_lang==\"\"\n\t\t\tprimary_lang=entry['key_lang']\n\t\tend\n\t\tkey_term=\"\"\n\t\tif primary_lang != \"\" and infos[:xlated_word] != nil and infos[:xlated_word][primary_lang]!=nil\n\t\t\tinfos[:xlated_word][primary_lang].each{|w|\n\t\t\t\tkey_term << \",\" if key_term != \"\"\n\t\t\t\tkey_term << w \n\t\t\t}\n\t\tend\n\n\t\tinfos[:key_attr]=attr\n\t\tinfos[:attributes]=attr\n\t\tkey_txt = '<p class=\"dict_key_1\">'\n\t\tkey_txt << \"<b>\"\n\t\tif key_term==\"\"\n\t\t\tif key_words.index(\"$phrase$\")!= nil\n\t\t\t\tkey_txt << @to_search\n\t\t\telse\n\t\t\t\tkey_txt << key_words\n\t\t\tend\n\t\telse\n\t\t\tkey_txt << key_term\n\t\tend\n\t\tkey_txt << \"</b>\"\n\t\tif attr != \"\"\n\t\t\tkey_txt << ' <i>' + attr + '</i>' \n\t\tend\n\t\tkey_txt << '</p>'\n\n\t\tinfos[:dict_entry_key]= key_txt\n\t\tadd_entry(entry['dict_id'],\n\t\t\t key_words,\n\t\t\t [html_txt],\n\t\t\t infos)\n\t\t##debug(sprintf(\"INFOS-FINAL\\n %s\\n\",infos.inspect()))\n\tend", "def create\n @word = Word.new(word_params)\n\n respond_to do |format|\n if @word.save\n format.html { redirect_to @word }\n format.json { render :show, status: :created, location: @word }\n else\n format.html { render :new }\n format.json { render json: @word.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @word = Word.new(word_params)\n\n respond_to do |format|\n if @word.save\n format.html { redirect_to @word, notice: 'Word was successfully created.' }\n format.json { render :show, status: :created, location: @word }\n else\n format.html { render :new }\n format.json { render json: @word.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @word = Word.new(word_params)\n\n respond_to do |format|\n if @word.save\n format.html { redirect_to @word, notice: 'Word was successfully created.' }\n format.json { render :show, status: :created, location: @word }\n else\n format.html { render :new }\n format.json { render json: @word.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @word = Word.new(word_params)\n\n respond_to do |format|\n if @word.save\n format.html { redirect_to @word, notice: 'Word was successfully created.' }\n format.json { render :show, status: :created, location: @word }\n else\n format.html { render :new }\n format.json { render json: @word.errors, status: :unprocessable_entity }\n end\n end\n end", "def add_word_default(w)\n add_word(w, \"#{w}r\", \"#{w}p\", \"#{w}m\")\n \n end", "def index_word(word)\n wordArray = word.split('')\n add_child(@root, wordArray, word)\n end", "def create\n @reg_word = RegWord.new(reg_word_params)\n\n respond_to do |format|\n if @reg_word.save\n format.html { redirect_to @reg_word, notice: 'Reg word was successfully created.' }\n format.json { render :show, status: :created, location: @reg_word }\n else\n format.html { render :new }\n format.json { render json: @reg_word.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n params = fill_optional_fields(word_params)\n @word = Word.new(params.merge({ language_id: @current_language, user_id: current_user.id }))\n\n respond_to do |format|\n if @word.save\n format.html { redirect_to action: 'index', notice: 'Word was successfully created.' }\n format.json { render :index, status: :created, location: @word }\n else\n format.html { render :new }\n format.json { render json: @word.errors, status: :unprocessable_entity }\n end\n end\n end", "def add_word(word)\n word_hash = Digest::SHA1.hexdigest(word)\n word_file_path = ROOT_DATA_FOLDER + word_hash\n word_file = File.open(word_file_path, 'a+')\n words = word_file.readlines\n words.each {|word| word.sub! \"\\n\", ''} # remove trailing \\n\n word_index = words.index(word)\n\n if word_index.nil? # add new word to end of file with count = 1\n add_line_to_file(word_file_path, word)\n add_line_to_file(word_file_path, '1')\n else # add count to existing word by replacing count line in file\n word_count = words[word_index + 1].to_i\n add_line_to_file(word_file_path, (word_count + 1).to_s, word_index + 1)\n end\n word_file.close\n end", "def set_word\n @word = Word.find(params[:id])\n end", "def set_word\n @word = Word.find(params[:id])\n end", "def set_word\n @word = Word.find(params[:id])\n end", "def set_word\n @word = Word.find(params[:id])\n end", "def set_word\n @word = Word.find(params[:id])\n end", "def set_word\n @word = Word.find(params[:id])\n end", "def set_word\n @word = Word.find(params[:id])\n end", "def set_word\n @word = Word.find(params[:id])\n end", "def set_word\n @word = Word.find(params[:id])\n end", "def set_word\n @word = Word.find(params[:id])\n end", "def set_word\n @word = Word.find(params[:id])\n end", "def set_word\n @word = Word.find(params[:id])\n end", "def set_word\n @word = Word.find(params[:id])\n end", "def set_word\n @word = Word.find(params[:id])\n end", "def set_word\n @word = Word.find(params[:id])\n end", "def set_word\n @word = Word.find(params[:id])\n end", "def set_new_word\n @new_word = current_user.new_words.find(params[:id])\n end", "def save_definition(word_id, definition)\n MinWords::DB[:defines].insert definition_text: definition, word_id: word_id\n end", "def add_node(word, definition)\n if @head.nil?\n add_first_node(word, definition)\n else\n new_node = Node.new(word, definition)\n @last.next = new_node\n @last = new_node\n end\n puts \"Added node with word: #{word} and defintion #{definition}\"\n end", "def add(word, nextWord)\r\n @words[word] = Hash.new(0) if !@words[word]\r\n @words[word][nextWord] += 1\r\n end", "def create\n @word = Word.new(word_params)\n\n respond_to do |format|\n if @word.save\n format.html { redirect_to @word, notice: 'Word was successfully created.' }\n format.json { render action: 'show', status: :created, location: @word }\n else\n format.html { render action: 'new' }\n format.json { render json: @word.errors, status: :unprocessable_entity }\n end\n end\n end" ]
[ "0.69925016", "0.6955626", "0.6931956", "0.6886098", "0.6740981", "0.65768975", "0.657635", "0.63593745", "0.6315144", "0.62976617", "0.6296913", "0.6245495", "0.62302846", "0.62298506", "0.62181634", "0.6209627", "0.61562204", "0.61460274", "0.61460274", "0.6085285", "0.6081218", "0.6081169", "0.60575134", "0.60503495", "0.60479945", "0.6047007", "0.60423404", "0.6024888", "0.602232", "0.59583867", "0.59583867", "0.59583867", "0.59583867", "0.59583867", "0.59583867", "0.59583867", "0.59583867", "0.59583867", "0.5943373", "0.59336734", "0.59289205", "0.59160495", "0.5884434", "0.5878734", "0.58762336", "0.5869423", "0.5868527", "0.5866857", "0.5843158", "0.5839702", "0.5837213", "0.58322257", "0.5826741", "0.58248603", "0.5824313", "0.5822904", "0.5822904", "0.58127624", "0.5806383", "0.5794632", "0.578934", "0.577622", "0.5770269", "0.576945", "0.576945", "0.5766552", "0.5752825", "0.57493037", "0.5748957", "0.5734815", "0.57260895", "0.5725983", "0.5725983", "0.5725983", "0.5725319", "0.5721405", "0.5715359", "0.571513", "0.5714858", "0.5713907", "0.5713907", "0.5713907", "0.5713907", "0.5713907", "0.5713907", "0.5713907", "0.5713907", "0.5713907", "0.5713907", "0.5713907", "0.5713907", "0.5713907", "0.5713907", "0.5713907", "0.5713907", "0.57103366", "0.57069516", "0.5700545", "0.56940454", "0.5684297" ]
0.6188542
16
Register a new Kanji entry
def insert_kanji(kanji) kanji = kanji.clone kanji_id = kanji.id @index_kanji ||= @database.prepare('insert into kanji_fts values (?, ?)') @insert_kanji ||= @database.prepare('insert into kanji values (?, ?)') @index_kanji.execute(kanji.character, kanji_id) # We can derive these data from FTS table. kanji.id = 0 kanji.character = '' @insert_kanji.execute(kanji_id, Kanji.encode(kanji)) end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_kanji\n @kanji = Kanji.find(params[:id])\n end", "def set_kanji\n @kanji = Kanji.find(params[:id])\n end", "def create\n @kanji = Kanji.new(kanji_params)\n\n respond_to do |format|\n if @kanji.save\n format.html { redirect_to @kanji, notice: 'Kanji was successfully created.' }\n format.json { render :show, status: :created, location: @kanji }\n else\n format.html { render :new }\n format.json { render json: @kanji.errors, status: :unprocessable_entity }\n end\n end\n end", "def has_kanji_entry?(d)\n d.has_kind?(Rkanren::KANJI)\n end", "def add_ucla_key\n $mascots.keys[5] = :ucla\nend", "def kanji_params\n params.require(:kanji).permit(:character, :level)\n end", "def register(key)\n return nil if bibliography.nil?\n\n k = key.dup\n k.succ! while bibliography.has_key?(k)\n bibliography.entries[k] = self\n k\n end", "def register(byte, type)\n MAPPINGS[byte] = type\n end", "def shell_registry_createkey(key, view)\n key = normalize_key(key)\n # REG ADD KeyName [/v ValueName | /ve] [/t Type] [/s Separator] [/d Data] [/f]\n shell_registry_cmd_result(\"add /f \\\"#{key}\\\"\", view)\n end", "def create\n respond_with(@i18n_key = Gallifreyian::I18nKey.create(params[:i18n_key]),\n location: i18n_keys_path)\n end", "def registry_createkey(key, view = REGISTRY_VIEW_NATIVE)\n if session_has_registry_ext\n meterpreter_registry_createkey(key, view)\n else\n shell_registry_createkey(key, view)\n end\n end", "def set_kai2_ji7\n @kai2_ji7 = Kai2Ji7.find(params[:id])\n @kai2_ji7.無齊記號 = '!'+@kai2_ji7.無齊記號\n end", "def create\n @kennkoukiroku = Kennkoukiroku.new(kennkoukiroku_params)\n\n respond_to do |format|\n if @kennkoukiroku.save\n format.html { redirect_to @kennkoukiroku, notice: \"健康記録を追加しました\" }\n format.json { render :show, status: :created, location: @kennkoukiroku }\n else\n format.html { render :new }\n format.json { render json: @kennkoukiroku.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @kakiko = Kakiko.new(kakiko_params)#属性を設定してモデルオブジェクトを生成\n @kakiko.user_id = current_user.id#user_id属性を追加\n @kakiko.tag = params[:kakiko][:tag]\n @kakiko.name = params[:kakiko][:name]\n if @kakiko.save#saveしないといくら追加したところで意味がない\n #renderでlocalhost:3000に返す \n render json: @kakiko, status: :created, location: @kakiko\n else\n render json: @kakiko.errors, status: :unprocessable_entity\n end\n end", "def create\n @leccion_kanji = LeccionKanji.new(leccion_kanji_params)\n\n respond_to do |format|\n if @leccion_kanji.save\n format.html { redirect_to @leccion_kanji, notice: 'Leccion kanji was successfully created.' }\n format.json { render :show, status: :created, location: @leccion_kanji }\n else\n format.html { render :new }\n format.json { render json: @leccion_kanji.errors, status: :unprocessable_entity }\n end\n end\n end", "def reg(key)\n\tGRAMMAR[key]\n end", "def reg(key)\n\tGRAMMAR[key]\nend", "def add_at!(word)\n new_word = \"#{Unicode.downcase(word)}а\"\n @redis.rpush(\"store:ats\", new_word)\n end", "def register(instance)\n key = build_key(instance)\n key_registry[key] = instance\n end", "def to_kanji(dd)\n d1, d2 = dd.length == 1 ? ['0', dd[0]-48] : [dd[0]-48, dd[1]-48]\n case d1 when 0 then ''\n when 1 then 'JUU'\n else to_kanji_single(d1.to_i) ;end \\\n + to_kanji_single(d2.to_s)\nend", "def register(key, klass)\n container.register(key, klass)\n end", "def to_katakana(src)\n src\n .gsub(\"わ゙\",\"ヷ\")\n .gsub(\"い゙\",\"ヸ\")\n .gsub(\"え゙\",\"ヹ\")\n .gsub(\"を゙\",\"ヺ\")\n .tr(\"ぁ-ゖゝゞゟ\",\"ァ-ヶヽヾヿ\")\nend", "def kcode() end", "def create\n logger.debug(params[:kokyaku])\n @kokyaku = Kokyaku.new(params[:kokyaku])\n\n # 現在日付から年度を取得する\n nend = get_nend()\n\n # 顧客IDの年度内最大値を取得する\n maxId = Kokyaku.maximum(:kokyakuId, :conditions => [\"\\\"kokyakuId\\\" BETWEEN ? AND ?\", (nend.to_s + \"00000\").to_i, (nend.to_s + \"99999\").to_i])\n if maxId.blank? then\n maxId = 0;\n end\n maxId_s = maxId.to_s\n if maxId_s.size > 5 then\n maxId = maxId_s[(maxId_s.size - 5),maxId_s.size].to_i\n end\n\n # 顧客IDを生成する -> 年度下2桁+5桁の連番\n @kokyaku.kokyakuId = \"#{nend}#{format(\"%05d\",(maxId + 1))}\".to_i\n\n respond_to do |format|\n if @kokyaku.save\n format.html { redirect_to action: \"index\", notice: 'Kokyaku was successfully created.', reload: 'on' }\n format.json { render json: @kokyaku, status: :created, location: @kokyaku }\n else\n format.html { render action: \"new\" }\n format.json { render json: @kokyaku.errors, status: :unprocessable_entity }\n end\n end\n end", "def add locale, key, value\n entry = self[key] ||= Entry.new\n entry[locale] = value\n end", "def add_key_input(name); end", "def index\n @kanjis = Kanji.all\n end", "def create\n @kolegiji = Kolegiji.new(params[:kolegiji])\n\n respond_to do |format|\n if @kolegiji.save\n format.html { redirect_to @kolegiji, notice: 'Kolegiji was successfully created.' }\n format.json { render json: @kolegiji, status: :created, location: @kolegiji }\n else\n format.html { render action: \"new\" }\n format.json { render json: @kolegiji.errors, status: :unprocessable_entity }\n end\n end\n end", "def koreanKey() # Kekekekeke\n\treturn \"\\x63\\xB8\\x2B\\xB4\\xF4\\x61\\x4E\\x2E\\x13\\xF2\\xFE\\xFB\\xBA\\x4C\\x9B\\x7E\"\nend", "def search_kanji(query, limit = 10)\n \n tokens = query.chars.select { |c| c.kanji? }\n results = []\n\n if tokens.present?\n @search_kanji ||= @database.prepare(SEARCH_KANJI_SQL)\n\n rows = @search_kanji.execute(tokens.join(' OR '), limit).to_a\n end\n\n rows.map do |row|\n kanji = Kanji.decode(row['serialized'])\n\n kanji.character = row['character']\n kanji.id = row['id'].to_i\n\n kanji\n end\n end", "def issjis;\tKconv.issjis(self) end", "def karutum_params\n params.require(:karutum).permit(:waka, :waka_kanji)\n end", "def add_key i18n_prj, input, select_from_translations\n v, quote = input.unquote\n\n if select_from_translations\n items = i18n_prj.potential_i18n_keys v\n if items.empty?\n TextMate::UI.tool_tip \"Can not find tranlation key\"\n else\n k = TextMate::UI.request_item \\\n :title => \"Select Translation Key\",\n :prompt => 'Select Translation Key',\n :items => items\n if !k\n TextMate::UI.tool_tip \"Canceled\"\n end\n end\n return input if !k\n else # new translation if needed\n v.gsub! '.', ''\n k = v[0..0].downcase + v[1..-1].underscore\n full_k = \"#{i18n_prj.key_prefix}.#{k}\"\n if i18n_prj.no_translation(full_k)\n new_k = TextMate::UI.request_string \\\n :default => full_k,\n :title => \"New Translation Key\"\n return input if new_k.nil? or new_k.empty?\n if new_k != full_k\n full_k = new_k\n k = new_k\n else\n k = '.' + k\n end\n insert_translation i18n_prj.en_yml_path, full_k, v\n else\n TextMate::UI.tool_tip \"Translation key '#{full_k}' exists\"\n k = '.' + k\n end\n end\n\n # snippet\n file_type = i18n_prj.file_type\n if quote or file_type == 'rb'\n %Q|t(#{k.inspect})|\n else\n case file_type\n when 'slim'; quote ? %Q|t(#{k.inspect})| : %Q|= t(#{k.inspect})|\n when 'haml'; quote ? %Q|t(#{k.inspect})| : %Q|= t(#{k.inspect})|\n else %Q|<%= t #{k.inspect} %>|\n end\n end\nend", "def set_kuaisufenfawenjian\n @kuaisufenfawenjian = Kuaisufenfawenjian.find(params[:id])\n end", "def init_word(kanji, options={})\n word = Word.find_by_kanji(kanji)\n word.update_attributes!(options)\n word\n end", "def put(namespace, key, entry); end", "def insert_kcup(kcup)\n end", "def register(category, name, klass)\n @plugins[category] ||= {}\n @plugins[category][name.downcase] = klass\n end", "def register\n ensure_post\n ensure_application\n ensure_valid_signature\n\n unless params[:source].blank?\n source = Tr8n::TranslationSource.find_or_create(params[:source], application)\n end\n\n phrases = []\n if params[:phrases]\n begin\n phrases = HashWithIndifferentAccess.new({:data => JSON.parse(params[:phrases])})[:data]\n rescue Exception => ex\n raise Tr8n::Exception.new(\"Invalid request. JSON parsing failed: #{ex.message}\")\n end\n elsif params[:label]\n phrases << {:label => params[:label], :description => params[:description]}\n end\n\n keys = []\n phrases.each do |phrase|\n phrase = {:label => phrase} if phrase.is_a?(String)\n next if phrase[:label].strip.blank?\n opts = {:source => source, :locale => (language || Tr8n::Config.default_language).locale, :application => application}\n keys << Tr8n::TranslationKey.find_or_create(phrase[:label], phrase[:description], opts).to_api_hash(:translations => false)\n end\n\n render_response(keys)\n end", "def meterpreter_registry_createkey(key, view)\n begin\n root_key, base_key = session.sys.registry.splitkey(key)\n perms = meterpreter_registry_perms(KEY_WRITE, view)\n open_key = session.sys.registry.create_key(root_key, base_key, perms)\n open_key.close\n return true\n rescue Rex::Post::Meterpreter::RequestError => e\n return nil\n end\n end", "def jmdict\n root = Nokogiri::XML(open(JMDICT_PATH))\t\t\n\n root.xpath('//entry').each do |entry|\n kanjis = entry.xpath('k_ele/keb').map(&:content)\n reading = entry.xpath('r_ele/reb').first.content\n meaning = entry.xpath('sense').first.xpath('gloss').first.content\n pos = entry.xpath('sense').first.xpath('pos').first.children\n\n kanjis.each do |kanji|\n word = Word.find_or_initialize_by_kanji(:kanji => kanji,\n :reading => reading,\n :pos => pos.to_s,\n :meaning => meaning)\n\n characters = word.kanji.split('').map do |char|\n Character.find_or_create_by_glyph(char)\n end\n \n word.characters = characters\n word.save!\n\n puts \"#{kanji} #{reading} (#{pos}) #{meaning}\"\n end\n end\n end", "def create\n @kuaisufenfawenjian = Kuaisufenfawenjian.new(kuaisufenfawenjian_params)\n\n respond_to do |format|\n if @kuaisufenfawenjian.save\n format.html { redirect_to @kuaisufenfawenjian, notice: 'Kuaisufenfawenjian was successfully created.' }\n format.json { render :show, status: :created, location: @kuaisufenfawenjian }\n else\n format.html { render :new }\n format.json { render json: @kuaisufenfawenjian.errors, status: :unprocessable_entity }\n end\n end\n end", "def create_bookmark\n print \"Enter a to z or 0-9 for bookmark: \"\n ch = get_char\n if ch =~ /^[0-9a-z]$/\n $bookmarks[ch.sym] = \"#{$subforum}\"\n $modified = true\n else\n perror \"Bookmark must be lower-case character or number.\"\n end\nend", "def kanji_params\n params.fetch(:kanji, {})\n end", "def add(title, &code)\n @codes[title] = code\n end", "def set_key_entry(aliaz, key, certificate_chain, password = nil)\n\n end", "def insert(character, trie)\n found = trie.find do |n|\n n.value == character\n end\n\n add_node(character, trie) unless found\n end", "def create\n @kokuin = Kokuin.new(kokuin_params)\n\n respond_to do |format|\n if @kokuin.save\n format.html { redirect_to admin_kokuin_path(@kokuin), notice: 'Kokuin was successfully created.' }\n format.json { render action: 'show', status: :created, location: @kokuin }\n else\n format.html { render action: 'new' }\n format.json { render json: @kokuin.errors, status: :unprocessable_entity }\n end\n end\n end", "def add_ata!(word)\n @redis.rpush(\"store:atas\", Unicode.downcase(word))\n end", "def register\r\n \r\n end", "def register\n end", "def register(name, type, value, options = nil)\n @entries << Seasar::Validate::Entry.new(name, type, value, options)\n end", "def create\n @symbole = Symbole.new()\n if symbole_params[:symbole_type].present?\n @symbole.build_kanji_attribute if symbole_params[:symbole_type] == \"kanji\"\n @symbole.build_kana_attribute if symbole_params[:symbole_type] == \"kana\"\n end\n @symbole.assign_attributes(symbole_params)\n\n respond_to do |format|\n if @symbole.save\n format.html { redirect_to @symbole, notice: 'Symbole was successfully created.' }\n format.json { render :show, status: :created, location: @symbole }\n else\n format.html { render :new }\n format.json { render json: @symbole.errors, status: :unprocessable_entity }\n end\n end\n end", "def add_entry_to_buffer(key_words,key_lang,xlated,entry)\n\t\tdict_id=entry['dict_id']\n\t\t##debug(sprintf(\"add_entry id(%s),lang(%s),[%s]\\n\",dict_id,key_lang,key_words))\n\t\t##\n\t\t## parse json-data\n\t\t##\n\t\tbegin\n\t\t\tentry_data=JSON.parse(entry['data'])\n\t\trescue\n\t\t\t##printf(\"Data Error![%s]\\n\",entry['data'])\n\t\t\treturn \n\t\tend\n\t\t##debug(sprintf(\"ENTRYDATA [%s]\\n\",entry_data.inspect()))\n\t\tgrammar=entry_data[\"#GRAMMAR\"]\n\t\tgrammar=\"\" if grammar==nil\n\t\tcategory=entry_data[\"#CATEGORY\"]\n\t\tcategory=\"\" if category==nil\n\t\tentry_num=entry_data[\"#ENTRY_NUM\"]\n\t\tentry_num=\"\" if entry_num==nil\n\t\thash_key= dict_id + key_words + key_lang + grammar + category\n\t\t@entries[hash_key]=Hash.new if @entries[hash_key]==nil\n\t\t@entries[hash_key][entry_num]=\n\t\t\t\t{\"dict_id\"=>dict_id,\n\t\t\t\t\"key_words\"=>key_words,\n\t\t\t\t\"grammar\"=>grammar,\n\t\t\t\t\"category\"=>category,\n\t\t\t\t\"key_lang\"=>key_lang,\n\t\t\t\t\"xlated_word\"=>xlated,\n\t\t\t\t\"entry_data\"=>entry_data}\n\tend", "def kanji_list(levels = nil)\n return level_items_list(\"kanji\", levels)\n end", "def create\n\n @JounalEntry = JounalEntry.new(JounalEntry_params)\n @JounalEntry.user_id = current_user.id if current_user\n \n if @JounalEntry.save\n redirect_to \"/\", notice: \"登録しました。\"\n else\n @paytypes = PayType.all\n render :new\n end\n \n end", "def set_kuaisujiaobenzhixing\n @kuaisujiaobenzhixing = Kuaisujiaobenzhixing.find(params[:id])\n end", "def create\n @kifu = Kifu.new(params[:kifu])\n\n respond_to do |format|\n if @kifu.save\n format.html { redirect_to @kifu, :notice => 'Kifu was successfully created.' }\n format.json { render :json => @kifu, :status => :created, :location => @kifu }\n else\n format.html { render :action => \"new\" }\n format.json { render :json => @kifu.errors, :status => :unprocessable_entity }\n end\n end\n end", "def set_character(row:, column:, character:)\n begin\n # It's a Japanese display, seemingly using Windows-31J\n #\n # Note that \\ displays as a yen sign, ~ displays as a right arrow, DEL displays as a left arrow\n chr = character.encode(\"Windows-31J\")\n @hw.set_character(row, column, chr)\n rescue Encoding::UndefinedConversionError => e\n raise OdroidLCD::LCDError, \"Could not display character: #{character}, the display uses Japanese Windows-31J\"\n end\n end", "def registry_entry( registry, entry_name )\n r = @registry[ registry ]\n r[ entry_name ] if r\n end", "def create\n @haiku = Haiku.new(params[:haiku])\n\n respond_to { |format|\n if @haiku.save\n\tflash[:notice] = 'The haiku was created, honey.'\n\n\t# Return to index. The first haiku listed will be the new one.\n\tformat.html {\n\t redirect_to haikus_url\n\t}\n\tformat.xml {\n\t head :created, :location => haiku_url(@haiku)\n\t}\n else\n\tformat.html {\n\t render :action => 'new'\n\t}\n\tformat.xml {\n\t render :xml => @haiku.errors.to_xml\n\t}\n end\n }\n end", "def fill_alphabet_from_kanji_name(member)\n # Last name\n if not member.last_name_alphabet.present? ||\n ( (member.last_name_alphabet.present?) && (not member.last_name_alphabet.ascii_only?) ) # If not alphabet\n member.update(last_name_alphabet: kanji_to_romaji_jlp(member.last_name).capitalize )\n end\n # First name\n if not member.first_name_alphabet.present? ||\n ( (member.first_name_alphabet.present?) && (not member.first_name_alphabet.ascii_only?) ) # If not alphabet\n member.update(first_name_alphabet: kanji_to_romaji_jlp(member.first_name).capitalize )\n end\n end", "def create\n @kuaisujiaobenzhixing = Kuaisujiaobenzhixing.new(kuaisujiaobenzhixing_params)\n\n respond_to do |format|\n if @kuaisujiaobenzhixing.save\n format.html { redirect_to @kuaisujiaobenzhixing, notice: 'Kuaisujiaobenzhixing was successfully created.' }\n format.json { render :show, status: :created, location: @kuaisujiaobenzhixing }\n else\n format.html { render :new }\n format.json { render json: @kuaisujiaobenzhixing.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @keiyaku = Keiyaku.new(keiyaku_params)\n\n respond_to do |format|\n if @keiyaku.save\n format.html { redirect_to @keiyaku, notice: 'Keiyaku was successfully created.' }\n format.json { render :show, status: :created, location: @keiyaku }\n else\n format.html { render :new }\n format.json { render json: @keiyaku.errors, status: :unprocessable_entity }\n end\n end\n end", "def register\n \n end", "def create\n # @katbib2 = Katbib2.new(katbib2_params)\n @katbib2 = Katbib2.find_or_create_by(name: katbib2_params[:name] )\n respond_to do |format|\n if @katbib2.save\n format.html {redirect_to :action => 'index', notice: 'Категория успешно создана.' }\n format.json { render :index, status: :created }\n else\n format.html { render :new }\n format.json { render json: @katbib2.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @translation_key = Multilang::TranslationKey.new translation_key_params\n @translation_key.tap(&:save).create_translations\n respond_to do |format|\n format.html { redirect_to translations_path }\n format.js\n end\n end", "def register(name, type)\n registry[name] = type\n end", "def pinyin char_code\n # look up in recent_table\n if char_code < 1000\n char_code = POLYGLOT_TMP[:RECENT][char_code-1][:char_code].to_i\n end\n\n uri = URI(\"http://www.hanviet.org/hv_timchu.php?unichar=#{char_code}\")\n res = Net::HTTP.get uri\n regex = /javascript:mandarin\\('([\\w]+)'\\)/ \n match = regex.match res\n return puts \"Can't find pinyin for char #{char_code}\" if match.nil?\n uchar = [char_code].pack \"U*\"\n puts \"#{uchar} #{match.to_a.last}\"\n end", "def set_chinese_phrase\n @chinese_phrase = ChinesePhrase.find(params[:id])\n end", "def set_leccion_kanji\n @leccion_kanji = LeccionKanji.find(params[:id])\n end", "def kana?\n codepoint.kana?\n end", "def create\n ActiveRecord::Base.transaction do\n #レコード登録数が最大数を超える場合、一番出勤時間が古く、idが一番若いレコードを削除する。\n @kintais.reorder(nil).order(\"t_syukkin ASC,id ASC\").first.destroy if @kintais.count >= G_MAX_USER_KINTAIS\n Kintai.new(:user_id => current_user.id,:t_syukkin => Time.now).save!\n current_user.update_attributes!(:f_state => !current_user.f_state ) \n end\n \n flash[:notice] = \"おはようございます。正常に記録されました。\"\n respond_with @kintai,:location => kintais_url\n end", "def create\n @english_entry = EnglishEntry.new(params[:english_entry])\n\n respond_to do |format|\n if @english_entry.save\n format.html { redirect_to @english_entry, notice: 'English entry was successfully created.' }\n format.json { render json: @english_entry, status: :created, location: @english_entry }\n else\n format.html { render action: \"new\" }\n format.json { render json: @english_entry.errors, status: :unprocessable_entity }\n end\n end\n end", "def set_jiankong\n @jiankong = Jiankong.find(params[:id])\n end", "def register_project(project, dictionary, version, locale, syn_list)\n select_project.select project\n edit_project.click\n add_icon.click\n select_dictionary_name.click\n select_from_dropdown(:dictionary_name, dictionary)\n select_dictionary_version.click\n select_from_dropdown(:version, version)\n select_locale.click\n select_from_dropdown(:locale, locale)\n select_syn_list.click\n select_from_dropdown(:synonym_list, syn_list)\n save_dictionary.click\n sleep 10\n save_and_send_to_source.click\n send_ok.click\n end", "def register\n end", "def register\n end", "def register\n end", "def tojis; Kconv.tojis(self) end", "def register_labels\n label.register if label?\n end", "def add_key(key, value, school)\t\t\t\t\t\t\t\t\t\t\t\t\t#di. create method to add keys & values\n\tschool[key] = value\nend", "def register_with_pkyp\n send_to_pkyp(@pub.to_s)\n end", "def create\n @key_indicate_map_indicator_key = KeyIndicateMap::IndicatorKey.new\n @key_indicate_map_indicator_key.save\n\n respond_to do |format|\n format.js\n format.json { head :no_content }\n end\n end", "def trans_kana!(vstring)\n vstring.tr!('ア-ン', 'ア-ン')\n vstring.tr!('ア-ンヴヵヶ', 'あ-んぶかが')\n end", "def writers_hash\n search_by_text_hash 'сценарий'\n end", "def registry_key=(value)\n @registry_key = value\n end", "def create\n @xinyongtongji = Xinyongtongji.new(params[:xinyongtongji])\n\n respond_to do |format|\n if @xinyongtongji.save\n format.html { redirect_to @xinyongtongji, notice: 'Xinyongtongji was successfully created.' }\n format.json { render json: @xinyongtongji, status: :created, location: @xinyongtongji }\n else\n format.html { render action: \"new\" }\n format.json { render json: @xinyongtongji.errors, status: :unprocessable_entity }\n end\n end\n end", "def ini_write_entry(filename,stanza,entry,value)\n require 'inifile'\n if not ::File.exist? filename\n ::File.open(filename,'w').close\n end\n f = IniFile.load(filename, :comment => '#')\n f[stanza][entry]=value\n f.write\nend", "def create\n @lookup_letterlabel = LookupLetterlabel.new(params[:lookup_letterlabel])\n\n respond_to do |format|\n if @lookup_letterlabel.save\n format.html { redirect_to(@lookup_letterlabel, :notice => 'Lookup letterlabel was successfully created.') }\n format.xml { render :xml => @lookup_letterlabel, :status => :created, :location => @lookup_letterlabel }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @lookup_letterlabel.errors, :status => :unprocessable_entity }\n end\n end\n end", "def register_alias(type, shortcut); end", "def show\n @kanji = Kanji.find(params[:id])\n @kanji_obj = KLookup::Lookup::Kanji.new(@kanji.kanji)\n @radicals = kanji_radicals(@kanji_obj)\n @examples = @kanji.words\n end", "def register_for(id); end", "def create\n @keihi = Keihi.new(keihi_params)\n\n respond_to do |format|\n if @keihi.save\n format.html { redirect_to @keihi, notice: 'Keihi was successfully created.' }\n format.json { render :show, status: :created, location: @keihi }\n else\n format.html { render :new }\n format.json { render json: @keihi.errors, status: :unprocessable_entity }\n end\n end\n end", "def add(key_file); end", "def add(name)\n self.class.add(name, @jira_key)\n end", "def test_sdbm\n key = 'あいうえおかきくけこ'\n val = 'たちつてとなにぬねの'\n @tag.tag_db[key] = val\n assert_equal val, @tag.tag_db[key].force_encoding(Encoding::UTF_8)\n end", "def add_entry(level, keywords)\n ENTRIES << [\n level.join(SYMBOL),\n map_keywords(keywords)\n ]\nend", "def create\n @kai2_ji7 = Kai2Ji7.new(kai2_ji7_params)\n\n respond_to do |format|\n if @kai2_ji7.save\n format.html { redirect_to @kai2_ji7, notice: 'Kai2 ji7 was successfully created.' }\n format.json { render action: 'show', status: :created, location: @kai2_ji7 }\n else\n format.html { render action: 'new' }\n format.json { render json: @kai2_ji7.errors, status: :unprocessable_entity }\n end\n end\n end", "def yakushokumaster_params\n params.require(:yakushokumaster).permit(:役職コード, :役職名)\n end" ]
[ "0.6294566", "0.6294566", "0.59592146", "0.59492", "0.55789036", "0.5529804", "0.54679143", "0.5467237", "0.54045975", "0.53881544", "0.5365606", "0.5327453", "0.53089637", "0.5302342", "0.5283817", "0.52235025", "0.5191792", "0.5166446", "0.51229167", "0.5102627", "0.5071384", "0.5066665", "0.50632274", "0.50218135", "0.50195944", "0.4990738", "0.49808773", "0.4970587", "0.4966411", "0.4952181", "0.49431458", "0.49290517", "0.49262616", "0.49218735", "0.4915756", "0.49120373", "0.48988396", "0.4874219", "0.4867618", "0.4866767", "0.4855292", "0.4851502", "0.4845683", "0.48421413", "0.4821498", "0.4820632", "0.48201805", "0.48081216", "0.4804593", "0.4800728", "0.47953176", "0.47947156", "0.4791542", "0.47815317", "0.47779188", "0.47693467", "0.4768454", "0.47662", "0.47648394", "0.47601253", "0.47501597", "0.4750113", "0.4747571", "0.47377014", "0.4733666", "0.47323534", "0.47268116", "0.47173798", "0.4713439", "0.47115034", "0.4708281", "0.47044984", "0.4703945", "0.46952876", "0.46907157", "0.46890712", "0.46867532", "0.46867532", "0.46867532", "0.46832785", "0.46762794", "0.46750203", "0.4674462", "0.46734348", "0.46661332", "0.46644408", "0.46633798", "0.4661027", "0.46603945", "0.4658155", "0.46514276", "0.46508846", "0.46495262", "0.46412268", "0.46387675", "0.46387437", "0.46385884", "0.4637568", "0.46367022", "0.46340647" ]
0.6248506
2
Search Word entries matching the given query string
def search_words(query) query = query.downcase if query.contains_japanese? words = search_words_by_literals(query, 50) else words = search_words_by_senses(query, 50) if words.size <= 10 extra_words = [] extra_words += search_words_by_literals(query.hiragana, 20) extra_words += search_words_by_literals(query.katakana, 20) extra_words.sort! { |w1, w2| w1.score <=> w2.score } words += extra_words end end words end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search(query); end", "def search(word)\n Parser.new(query(word)).parse\n end", "def search_words\n begin\n regex = Regexp.new(@pattern)\n rescue RegexpError => msg\n error_msg(msg)\n rescue NameError => msg\n error_msg(msg)\n end\n @results = DICT.select do |word|\n regex =~ word\n end\n @num_results = @results.length\n format_results\n display_results\n end", "def search(word)\n \n end", "def search(word)\r\n \r\n end", "def match_query(query); end", "def search(query)\n @all.each_with_object([]) do |record, matches|\n matches << record if ((record['866']['t'] == query) || (record['866']['s'] == query))\n end\n end", "def search(query, options = {}); end", "def search!(\n query, case_sensitive: false, whole_sentence: true,\n limit: 10, skip: 0, sentence_limit: 80\n )\n results = search(\n query,\n case_sensitive: case_sensitive,\n whole_sentence: whole_sentence,\n limit: limit,\n skip: skip\n )\n\n results.each do |doc|\n doc.search!(\n query,\n case_sensitive: case_sensitive,\n whole_sentence: whole_sentence,\n sentence_limit: sentence_limit\n )\n yield(doc) if block_given?\n end\n\n results\n end", "def search_process\n @search_text =params[:q].to_s\n all =params[:all].to_s\n exact =params[:exact].to_s\n any =params[:any].to_s\n none =params[:none].to_s\n advanced_query=\"\"\n\n if all != \"\"\n all =all.split(' ')\n all_like =all.map { |x| \"keyword like \" + \"'%\" + x + \"%'\" }\n all_like =all_like.join(' and ')\n advanced_query=all_like\n end\n\n if exact != \"\" && all != \"\"\n exact =\"'%\"+exact+\"%'\"\n advanced_query = advanced_query + \" and keyword like \" + exact\n end\n\n if exact != \"\" && all == \"\"\n exact =\"'%\"+exact+\"%'\"\n advanced_query = \"keyword like \" + exact\n end\n\n if any != \"\" and (all != \"\" or exact != \"\")\n any =any.split(' ')\n any_like =any.map { |x| \"keyword like \" + \"'%\" + x + \"%'\" }\n any_like =any_like.join(' or ')\n advanced_query = advanced_query + \" and (\" + any_like + \")\"\n end\n\n if any != \"\" and all == \"\" and exact == \"\"\n any =any.split(' ')\n any_like =any.map { |x| \"keyword like \" + \"'%\" + x + \"%'\" }\n any_like =any_like.join(' or ')\n advanced_query = \"(\" + any_like + \")\"\n end\n\n if none != \"\" and (all != \"\" or exact != \"\" or any != \"\")\n none =none.split(' ')\n none_not_like=none.map { |x| \"keyword not like \" + \"'%\" + x + \"%'\" }\n\n none_not_like=none_not_like.join(' and ')\n\n advanced_query=advanced_query + \" and \" + none_not_like\n\n end\n\n if none != \"\" and all == \"\" and exact == \"\" and any == \"\"\n none =none.split(' ')\n none_not_like=none.map { |x| \"keyword not like \" + \"'%\" + x + \"%'\" }\n\n none_not_like=none_not_like.join(' and ')\n\n advanced_query= none_not_like\n end\n\n\n advanced_query = \"SELECT Model_ID FROM keyword_symbol_tables WHERE \"+advanced_query\n\n parameter_search_text=@search_text.split.join(\" \")\n keyword_array =parameter_search_text.split(' ')\n keyword_count =keyword_array.size\n\n connection = ActiveRecord::Base.connection\n\n if all != \"\" or exact != \"\" or any != \"\" or none != \"\"\n @resultset = connection.execute(\"#{advanced_query}\");\n else\n @resultset = connection.execute(\"call keyword_search('#{parameter_search_text}',#{keyword_count})\");\n end\n\n ActiveRecord::Base.clear_active_connections!\n\n @resultset_strings = @resultset.map { |result| result.to_s.gsub(/[^0-9A-Za-z]/, '') }\n\n @model_ids =Array.new\n @model_names =Array.new\n @model_types =Array.new\n\n @resultset_strings.each do |result|\n\n substring=result[0..4]\n\n if substring == \"NMLCL\"\n cell=Cell.find_by_Cell_ID(result.to_s)\n name=cell.Cell_Name\n type=\"Cell\"\n end\n\n if substring == \"NMLCH\"\n channel=Channel.find_by_Channel_ID(result.to_s)\n name =channel.Channel_Name\n type =\"Channel\"\n end\n\n\n if substring == \"NMLNT\"\n network=Network.find_by_Network_ID(result.to_s)\n name =network.Network_Name\n type =\"Network\"\n end\n\n if substring == \"NMLSY\"\n synapse=Synapse.find_by_Synapse_ID(result.to_s)\n name =synapse.Synapse_Name\n type =\"Synapse\"\n end\n\n @model_ids.push(result)\n @model_names.push(name)\n @model_types.push(type)\n\n end\n\n if @model_ids.count != 0\n\n render :partial => 'keyword_results_list',\n :locals => {\n :model_ids => @model_ids,\n :model_names => @model_names,\n :model_types => @model_types\n }\n\n else\n\n render :partial => 'no_results'\n\n end\n\n end", "def parse_search(q)\n # TODO continue\n end", "def search(query, case_type: :smart)\n WWID.new.fuzzy_filter_items(self, query, case_type: case_type)\n end", "def search\n terms = params[:query].split\n query = terms.map { |term| \"title like '%#{term}%' OR body like '%#{term}%' OR tags like '%#{term}%'\" }.join(\" OR \")\n \n @posts = Post.where(query).order(\"created_at DESC\").first(10)\n end", "def findWord(query, array_of_strings)\n\t#array_of_strings.select {|str| str.match(query) }\n #array_of_strings.any? {|i| i[query] }\n array_of_strings.reject {|x| x.match (/#{query}/) }\nend", "def do_search\n @search_text = params[:q]\n\n # Doctoring for the view to find matches:\n @q = @search_text\n @q.chop! if params[:q] =~ /\\*$/\n @q = @q[1..-1] if params[:q] =~ /^\\*/\n\n # TODO: we'll want some whitelist filtering here later:\n # params[:q] = \"#{@q}*\" unless params[:q] =~ /\\*$/ or params[:q] =~ /^[-+]/ or params[:q] =~ /\\s/\n params[:q] = I18n.transliterate(params[:q]).downcase\n\n # TODO: This search suggestions block is large; extract.\n\n # First step (and, yes, this will be slow—we will optimize later), look for\n # search suggestions that match the query:\n words = params[:q].split # TODO: we might want to remove words with ^-\n # TODO: we might also want to remove stopwords e.g.: https://github.com/brenes/stopwords-filter\n suggestions = []\n # YUCK! This is the best way to do this in Searchkick at the moment, though.\n # :S\n words.each do |word|\n word_search = SearchSuggestion.search(word, fields: [{ match: :exact }])\n suggestions += word_search.results if word_search.respond_to?(:results)\n end\n\n # If we only found one thing and they only asked for one thing:\n if suggestions.size == 1 && params[:q] !~ /\\s/\n Rails.logger.warn(\"One suggestion.\")\n # TODO: move this to a helper? It can't go on the model...\n suggestion = suggestions.first\n suggestion = suggestion.synonym_of if suggestion.synonym_of\n where = case suggestion.type\n when :page\n suggestion.page\n when :object_term\n term_records_path(uri: suggestion.object_term, object: true)\n when :path\n suggestion.path\n when :wkt_string\n flash[:notice] = \"Unimplemented, sorry.\"\n \"/\"\n end\n return redirect_to(where)\n elsif suggestions.size >= 2 && params[:q] =~ /\\s/\n Rails.logger.warn(\"Multiple suggestions.\")\n groups = suggestions.group_by(&:type)\n # Easier to handle:\n groups[:page] ||= []\n groups[:object_term] ||= []\n groups[:path] ||= []\n groups[:wkt_string] ||= []\n if groups[:page].size > 1\n Rails.logger.warn(\"Multiple PAGE suggestions.\")\n # We can't use suggestions if there's more than one species. Sorry.\n flash[:notice] = t(\"search.flash.more_than_one_species\",\n species: groups[:page].map(&:match).to_sentence)\n else\n Rails.logger.warn(\"0 or 1 Page suggestions.\")\n clade = groups[:page].try(:first).try(:page_id)\n Rails.logger.warn(\"Page suggestion: #{clade}\") if clade\n if groups[:object_term].size > 2\n Rails.logger.warn(\"Over two TERM suggestions.\")\n flash[:notice] = t(\"search.flash.more_than_two_terms\",\n terms: groups[:object_term].map(&:match).to_sentence)\n elsif groups[:path].size > 0\n Rails.logger.warn(\"...had PATH suggestions.\")\n flash[:notice] = t(\"search.flash.cannot_combine_paths\",\n path: groups[:path].map(&:match).to_sentence)\n else # NOTE: this assumes we only have OBJECT term suggestions, not predicates.\n Rails.logger.warn(\"Usable suggestions...\")\n (first, second) = groups[:object_term] # Arbitrary which is first...\n Rails.logger.warn(\"First term: #{first.object_term}\")\n Rails.logger.warn(\"Second term: #{second.object_term}\") if second\n return redirect_to(term_records_path(uri: first.object_term, object: true,\n and_object: second.try(:object_term), clade: clade))\n end\n end\n end\n\n @clade = if params[:clade]\n puts \"*\" * 100\n puts \"** Filtering by clade #{params[:clade]}\"\n # It doesn't make sense to filter some things by clade:\n params[:only] = if params[:only]\n Array(params[:only]) - [:collections, :users, :predicates, :object_terms]\n else\n [:pages, :media]\n end\n puts \"Only param should now be: #{params[:only]}\"\n Page.find(params[:clade])\n else\n nil\n end\n\n default = params.has_key?(:only)? false : true\n @types = {}\n [ :pages, :collections, :articles, :images, :videos, :videos, :sounds, :links, :users, :predicates, :object_terms ].\n each do |sym|\n @types[sym] = default\n end\n\n @types[params[:only].to_sym] = true if params.has_key?(:only)\n\n # if params.has_key?(:only)\n # Array(params[:only]).each { |type| @types[type.to_sym] = true }\n # elsif params.has_key?(:except)\n # Array(params[:except]).each { |type| @types[type.to_sym] = false }\n # end\n\n # NOTE: no search is performed unless the @types hash indicates a search for\n # that class is required:\n\n @pages = if @types[:pages]\n fields = %w[preferred_vernacular_strings^20 vernacular_strings^20 preferred_scientific_names^10 scientific_name^10 synonyms^10 providers resource_pks]\n match = words.size == 1 ? :text_start : :phrase\n basic_search(Page, boost_by: [:page_richness, :specificity, :depth], match: match, fields: fields,\n where: @clade ? { ancestry_ids: @clade.id } : nil,\n includes: [:preferred_vernaculars, :medium, { native_node: { node_ancestors: :ancestor } }])\n else\n nil\n end\n\n\n @collections = if @types[:collections]\n basic_search(Collection, fields: [\"name^5\", \"description\"])\n else\n nil\n end\n\n @articles = if @types[:articles]\n basic_search(Searchkick,\n fields: [\"name^5\", \"resource_pk^10\", \"owner\", \"description^2\"],\n where: @clade ? { ancestry_ids: @clade.id } : nil,\n index_name: [Article])\n else\n nil\n end\n\n @images = if @types[:images]\n media_search(\"image\")\n else\n nil\n end\n\n @videos = if @types[:videos]\n media_search(\"video\")\n else\n nil\n end\n\n @sounds = if @types[:sounds]\n media_search(\"sound\")\n else\n nil\n end\n\n # @links = if @types[:links]\n # basic_search(Searchkick,\n # fields: [\"name^5\", \"resource_pk^10\", \"owner\", \"description^2\"],\n # where: @clade ? { ancestry_ids: @clade.id } : nil,\n # index_name: [Link])\n # else\n # nil\n # end\n\n @users = if @types[:users]\n basic_search(User, fields: [\"username^6\", \"name^4\", \"tag_line\", \"bio^2\"])\n else\n nil\n end\n\n Searchkick.multi_search([@pages, @articles, @images, @videos, @sounds, @collections, @users].compact)\n\n @pages = PageSearchDecorator.decorate_collection(@pages) if @pages\n @articles = ArticleSearchDecorator.decorate_collection(@articles) if @articles\n @images = ImageSearchDecorator.decorate_collection(@images) if @images\n @videos = VideoSearchDecorator.decorate_collection(@videos) if @videos\n @sounds = SoundSearchDecorator.decorate_collection(@sounds) if @sounds\n @collections = CollectionSearchDecorator.decorate_collection(@collections) if @collections\n @users = UserSearchDecorator.decorate_collection(@users) if @users\n\n # if @types[:predicates]\n # @predicates_count = TraitBank.count_predicate_terms(@q)\n # # NOTE we use @q here because it has no wildcard.\n # @predicates = Kaminari.paginate_array(\n # TraitBank.search_predicate_terms(@q, params[:page], params[:per_page]),\n # total_count: @predicates_count\n # ).page(params[:page]).per(params[:per_page] || 50)\n # end\n #\n # if @types[:object_terms]\n # @object_terms_count = TraitBank.count_object_terms(@q)\n # # NOTE we use @q here because it has no wildcard.\n # @object_terms = Kaminari.paginate_array(\n # TraitBank.search_object_terms(@q, params[:page], params[:per_page]),\n # total_count: @object_terms_count\n # ).page(params[:page]).per(params[:per_page] || 50)\n # end\n\n respond_to do |fmt|\n fmt.html do\n @page_title = t(:page_title_search, query: @q)\n end\n\n fmt.js { }\n\n # TODO: JSON results for other types! TODO: move; this is view logic...\n fmt.json do\n render json: JSON.pretty_generate(@pages.results.as_json(\n except: :native_node_id,\n methods: :scientific_name,\n include: {\n preferred_vernaculars: { only: [:string],\n include: { language: { only: :code } } },\n # NOTE I'm excluding a lot more for search than you would want for\n # the basic page json:\n top_media: { only: [ :id, :guid, :owner, :name ],\n methods: [:small_icon_url, :medium_icon_url],\n include: { provider: { only: [:id, :name] },\n license: { only: [:id, :name, :icon_url] } } }\n }\n ))\n end\n end\n end", "def search\n\t\terror_message = nil\n\t\tresults = []\n\t\tloop do\n\t\t\t# system \"clear\"\n\t\t\tputs error_message || \"Please enter how you'd like to search.\".green\n\t\t\tputs \"1) Exact Match\\n2) Partial Match\\n3) Begins With...\\n4) Ends With...\\n\"\n\t\t\tprint \">\".blink.cyan\n\t\t\tinput = gets.chomp.to_i\n\t\t\tif input.is_a?(Fixnum) && input >= 1 && input <= 4\n\t\t\t\tresults = @dictionary_analyzer.search(input, @dictionary)\n\t\t\t\tbreak\n\t\t\telse\n\t\t\t\terror_message = \"Sorry, invalid input. Please choose 1,2,3 or 4.\"\n\t\t\t\tredo\n\t\t\tend\t\n\t\tend\n\n\t\t# Now that we have the results let's do something\n\t\t# with them. Unless there aren't any.\n\t\tif results.count == 0\n\t\t\tputs \"Sorry, no words were found that match that string.\"\n\t\telse\n\t\t\tfound_match(results)\n\t\tend\n\tend", "def search_text(query, text)\n text = pattern(text)\n query.where { title.ilike(text) | description.ilike(text) }\n end", "def search phrase, callback = nil\n \n and_phrases, or_phrases, not_phrases, two_word_phrases = get_and_or_not_exact_of phrase\n \n res = nil\n \n two_word_phrases.each do |phrase|\n res = List::chain res, @handlers['two_word'].search(phrase)\n end\n \n if not and_phrases.empty?\n res = List::chain res, @handlers['simple'].search_and(and_phrases)\n end\n \n or_phrases.each do |or_p|\n res = List::chain res, @handlers['simple'].search_or(or_p)\n end\n \n except = @handlers['simple'].search_and not_phrases\n \n final_result = []\n # finally filter pages not to show and return\n # real document names\n prefinal_ = List::NOT(res, except)\n if prefinal_\n prefinal_.each { |x| final_result |= [@manager.get_by_id(x)] }\n else\n puts 'warning: empty prefinal result'\n end\n \n # callback may be used for postfiltration etc.\n if callback\n final_result = callback final_result\n end\n \n return final_result\n end", "def find_by_word(word, root, root_namespace, options = nil)\n xquery = <<-GENERATED\n import module namespace search = \"http://marklogic.com/appservices/search\" at \"/MarkLogic/appservices/search/search.xqy\";\n search:search(\"#{word}\",\n GENERATED\n search_options = setup_options(options, root, root_namespace)\n xquery << search_options.to_s\n xquery << ')'\n end", "def search(dbpath, querystring, offset: 0, pagesize: 10)\n # offset - defines starting point within result set\n # pagesize - defines number of records to retrieve\n\n # Open the database we're going to search.\n db = Xapian::Database.new(dbpath)\n\n # Set up a QueryParser with a stemmer and suitable prefixes\n queryparser = Xapian::QueryParser.new\n queryparser.stemmer = Xapian::Stem.new('en')\n queryparser.stemming_strategy = Xapian::QueryParser::STEM_SOME\n queryparser.add_prefix('title', 'S')\n queryparser.add_prefix('description', 'XD')\n # and add in range processors\n queryparser.add_rangeprocessor(PopulationRangeProcessor.new(3, 500_000, 50_000_000))\n # Start of date example code\n queryparser.add_rangeprocessor(Xapian::DateRangeProcessor.new(2, Xapian::RP_DATE_PREFER_MDY, 1860))\n queryparser.add_rangeprocessor(Xapian::NumberRangeProcessor.new(1))\n # End of date example code\n # And parse the query\n query = queryparser.parse_query(querystring)\n\n # Use an Enquire object on the database to run the query\n enquire = Xapian::Enquire.new(db)\n enquire.query = query\n\n # And print out something about each match\n matches = []\n enquire.mset(offset, pagesize).matches.each do |match|\n fields = JSON.parse(match.document.data)\n printf \"%<rank>i: #%<docid>3.3i %<name>s %<date>s\\n Population %<pop>s\\n\",\n rank: match.rank + 1,\n docid: match.docid,\n name: fields['name'],\n date: format_date(fields['admitted'].to_s),\n pop: format_numeral(fields['population'].to_i)\n matches << match.docid\n end\n log_matches(querystring, offset, pagesize, matches)\nend", "def search query\n @content = @reader.read if @content.nil?\n @content.select do |doc|\n rs = []\n query.terms.each do |term|\n if term.compare(doc.send(term.field))\n rs << true\n end\n end\n if query.relation == :and\n rs.count == query.terms.count\n else\n !rs.empty?\n end\n end\n end", "def search!(collection, query_str)\n res = collection\n if query_str.present?\n\n query_str.split(Regex::FIELD).each do |term|\n next if term.blank?\n\n parts = []\n terms = []\n fields.each do |f|\n field = get_field(f, collection)\n part = [\"#{field} #{Regex::LIKE} \",\n \"#{query_base} ESCAPE '#{Regex::ESCAPECHAR}'\"].join('')\n terms << get_term(term)\n parts << part\n end\n\n if parts.any?\n query = parts.join(\" #{Regex::OR} \")\n res = res.where(query, *terms)\n end\n end\n end\n res\n end", "def search(wd)\n\tpage = Query::Engine::Baidu.query(wd)\n\t#page.seo_rank #it seems that the seo_rank of baidu is not complete crawled the search page\n\trelated_keywords_baidu = page.related_keywords \n\trelated_keywords_baidu.each do |keywords| # save each keywords into database unless the word is exist already.\n\t\tnext unless RelateWorld.find_by_keyword(keywords) == nil\n\t\trelate = RelateWorld.new\n\t\trelate.keyword = keywords\n\t\trelate.save\n\tend \nend", "def fulltextsearch(query, text, words=2)\n result = \"\"\n if text =~ /.*#{query}.*/i\n text_words = text.scan(/\\w*/)\n indexes = []\n text_words.each_with_index do |word,index|\n if word =~ /.*#{query}.*/i\n i = []\n i << index - words unless words == 0 || index - words < 0\n i << index\n i << index + words unless words == 0 || index + words > text_words.length\n indexes << i\n end\n end\n indexes.each do |i|\n result += \"... \" unless i.length == 1\n i.each {|j| result += \"#{text_words[j]} \"}\n result += \" ...\" unless i.length == 1\n end\n end\n result\n end", "def search_process\r\nsearch_text=params[:q].to_s\r\nall=params[:all].to_s\r\nexact=params[:exact].to_s\r\nany=params[:any].to_s\r\nnone=params[:none].to_s\r\nadvanced_query=\"\"\r\n\r\nif all != \"\"\r\nall=all.split(' ')\r\nall_like=all.map {|x| \"keyword like \" + \"'%\" + x + \"%'\" }\r\nall_like=all_like.join(' and ')\r\nadvanced_query=all_like\r\nend\r\n\r\nif exact != \"\" && all != \"\"\r\nexact=\"'%\"+exact+\"%'\"\r\nadvanced_query = advanced_query + \" and keyword like \" + exact\r\nend\r\n\r\nif exact != \"\" && all == \"\"\r\nexact=\"'%\"+exact+\"%'\"\r\nadvanced_query = \"keyword like \" + exact\r\nend\r\n\r\nif any != \"\" and ( all != \"\" or exact != \"\" )\r\nany=any.split(' ')\r\nany_like=any.map { |x| \"keyword like \" + \"'%\" + x + \"%'\" }\r\nany_like=any_like.join(' or ')\r\nadvanced_query = advanced_query + \" and (\" + any_like + \")\"\r\nend\r\n\r\nif any != \"\" and all == \"\" and exact == \"\"\r\nany=any.split(' ')\r\nany_like=any.map { |x| \"keyword like \" + \"'%\" + x + \"%'\" }\r\nany_like=any_like.join(' or ')\r\nadvanced_query = \"(\" + any_like + \")\"\r\nend\r\n\r\nif none != \"\" and (all != \"\" or exact != \"\" or any != \"\")\r\nnone=none.split(' ')\r\nnone_not_like=none.map { |x| \"keyword not like \" + \"'%\" + x + \"%'\" }\r\n\r\nnone_not_like=none_not_like.join(' and ')\r\n\r\nadvanced_query=advanced_query + \" and \" + none_not_like\r\n\r\nend\r\n\r\nif none != \"\" and all == \"\" and exact == \"\" and any == \"\"\r\nnone=none.split(' ')\r\nnone_not_like=none.map { |x| \"keyword not like \" + \"'%\" + x + \"%'\" }\r\n\r\nnone_not_like=none_not_like.join(' and ')\r\n\r\nadvanced_query= none_not_like\r\nend\r\n\r\n\r\n\r\n\r\n\r\nadvanced_query = \"SELECT Model_ID FROM keyword_symbol_tables WHERE \"+advanced_query\r\nputs \"\\n\\n***********************************\\n\\n\"+advanced_query+\"\\n\\n**********************\\n\\n\"\r\n\r\nparameter_search_text=search_text.split.join(\" \")\r\n keyword_array=parameter_search_text.split(' ')\r\n keyword_count=keyword_array.size\r\n\r\nconnection = ActiveRecord::Base.connection();\r\nif all != \"\" or exact != \"\" or any != \"\" or none != \"\"\r\n@resultset = connection.execute(\"#{advanced_query}\");\r\nelse\r\n@resultset = connection.execute(\"call keyword_search('#{parameter_search_text}',#{keyword_count})\");\r\nend\r\nActiveRecord::Base.clear_active_connections!()\r\n\r\[email protected] do |res|\r\nputs res\r\nend\r\n@resultset_strings = @resultset.map { |result| result.to_s.gsub(/[^0-9A-Za-z]/, '')}\r\n@model_ids=Array.new\r\n@model_names=Array.new\r\n@model_types=Array.new\r\n@resultset_strings.each do |result|\r\nsubstring=result[0..4]\r\nputs\"\\n\\n************\"+substring\r\nif substring == \"NMLCL\"\r\ncell=Cell.find_by_Cell_ID(result.to_s)\r\nname=cell.Cell_Name\r\ntype=\"Cell\"\r\nend\r\n\r\nif substring == \"NMLCH\"\r\nchannel=Channel.find_by_Channel_ID(result.to_s)\r\nname=channel.Channel_Name\r\ntype=\"Channel\"\r\nend\r\n\r\n\r\nif substring == \"NMLNT\"\r\nnetwork=Network.find_by_Network_ID(result.to_s)\r\nname=network.Network_Name\r\ntype=\"Network\"\r\nend\r\n\r\n#if substring == \"NMLSY\"\r\n#name=Synapse.find_by_Synapse_ID(result.to_s)\r\n#type=\"Syanpse\"\r\n#end\r\n\r\n@model_ids.push(result)\r\n@model_names.push(name)\r\n@model_types.push(type)\r\nputs \"result-\"+result+\"name-\"+name.to_s\r\nend\r\n\r\nif @model_ids.count != 0\r\nrender :partial => 'keyword_results_list',:locals => {:model_ids => @model_ids,:model_names => @model_names,:model_types => @model_types}\r\nelse\r\nrender :partial => 'no_results'\r\nend\r\n\r\n\r\n end", "def search_process\r\nsearch_text=params[:q].to_s\r\nall=params[:all].to_s\r\nexact=params[:exact].to_s\r\nany=params[:any].to_s\r\nnone=params[:none].to_s\r\nadvanced_query=\"\"\r\n\r\nif all != \"\"\r\nall=all.split(' ')\r\nall_like=all.map {|x| \"keyword like \" + \"'%\" + x + \"%'\" }\r\nall_like=all_like.join(' and ')\r\nadvanced_query=all_like\r\nend\r\n\r\nif exact != \"\" && all != \"\"\r\nexact=\"'%\"+exact+\"%'\"\r\nadvanced_query = advanced_query + \" and keyword like \" + exact\r\nend\r\n\r\nif exact != \"\" && all == \"\"\r\nexact=\"'%\"+exact+\"%'\"\r\nadvanced_query = \"keyword like \" + exact\r\nend\r\n\r\nif any != \"\" and ( all != \"\" or exact != \"\" )\r\nany=any.split(' ')\r\nany_like=any.map { |x| \"keyword like \" + \"'%\" + x + \"%'\" }\r\nany_like=any_like.join(' or ')\r\nadvanced_query = advanced_query + \" and (\" + any_like + \")\"\r\nend\r\n\r\nif any != \"\" and all == \"\" and exact == \"\"\r\nany=any.split(' ')\r\nany_like=any.map { |x| \"keyword like \" + \"'%\" + x + \"%'\" }\r\nany_like=any_like.join(' or ')\r\nadvanced_query = \"(\" + any_like + \")\"\r\nend\r\n\r\nif none != \"\" and (all != \"\" or exact != \"\" or any != \"\")\r\nnone=none.split(' ')\r\nnone_not_like=none.map { |x| \"keyword not like \" + \"'%\" + x + \"%'\" }\r\n\r\nnone_not_like=none_not_like.join(' and ')\r\n\r\nadvanced_query=advanced_query + \" and \" + none_not_like\r\n\r\nend\r\n\r\nif none != \"\" and all == \"\" and exact == \"\" and any == \"\"\r\nnone=none.split(' ')\r\nnone_not_like=none.map { |x| \"keyword not like \" + \"'%\" + x + \"%'\" }\r\n\r\nnone_not_like=none_not_like.join(' and ')\r\n\r\nadvanced_query= none_not_like\r\nend\r\n\r\n\r\n\r\n\r\n\r\nadvanced_query = \"SELECT Model_ID FROM keyword_symbol_tables WHERE \"+advanced_query\r\nputs \"\\n\\n***********************************\\n\\n\"+advanced_query+\"\\n\\n**********************\\n\\n\"\r\n\r\nparameter_search_text=search_text.split.join(\" \")\r\n keyword_array=parameter_search_text.split(' ')\r\n keyword_count=keyword_array.size\r\n\r\nconnection = ActiveRecord::Base.connection();\r\nif all != \"\" or exact != \"\" or any != \"\" or none != \"\"\r\n@resultset = connection.execute(\"#{advanced_query}\");\r\nelse\r\n@resultset = connection.execute(\"call keyword_search('#{parameter_search_text}',#{keyword_count})\");\r\nend\r\nActiveRecord::Base.clear_active_connections!()\r\n\r\[email protected] do |res|\r\nputs res\r\nend\r\n@resultset_strings = @resultset.map { |result| result.to_s.gsub(/[^0-9A-Za-z]/, '')}\r\n@model_ids=Array.new\r\n@model_names=Array.new\r\n@model_types=Array.new\r\n@resultset_strings.each do |result|\r\nsubstring=result[0..4]\r\nputs\"\\n\\n************\"+substring\r\nif substring == \"NMLCL\"\r\ncell=Cell.find_by_Cell_ID(result.to_s)\r\nname=cell.Cell_Name\r\ntype=\"Cell\"\r\nend\r\n\r\nif substring == \"NMLCH\"\r\nchannel=Channel.find_by_Channel_ID(result.to_s)\r\nname=channel.Channel_Name\r\ntype=\"Channel\"\r\nend\r\n\r\n\r\nif substring == \"NMLNT\"\r\nnetwork=Network.find_by_Network_ID(result.to_s)\r\nname=network.Network_Name\r\ntype=\"Network\"\r\nend\r\n\r\n#if substring == \"NMLSY\"\r\n#name=Synapse.find_by_Synapse_ID(result.to_s)\r\n#type=\"Syanpse\"\r\n#end\r\n\r\n@model_ids.push(result)\r\n@model_names.push(name)\r\n@model_types.push(type)\r\nputs \"result-\"+result+\"name-\"+name.to_s\r\nend\r\n\r\nif @model_ids.count != 0\r\nrender :partial => 'keyword_results_list',:locals => {:model_ids => @model_ids,:model_names => @model_names,:model_types => @model_types}\r\nelse\r\nrender :partial => 'no_results'\r\nend\r\n\r\n\r\n end", "def search\n @documents = api.form(\"everything\")\n .query(%([[:d = fulltext(document, \"#{params[:q]}\")]]))\n .page(params[:page] ? params[:page] : \"1\")\n .page_size(params[:page_size] ? params[:page_size] : \"20\")\n .submit(ref)\n end", "def index_by_keyword\n entries = Entry.all\n @selected_entries = Array.new\n @keyword = params[:keyword]\n entries.each do |entry|\n if /#{@keyword}/.match(entry.content) ||\n /#{@keyword}/.match(entry.tags) ||\n /#{@keyword}/.match(entry.title)\n @selected_entries << entry\n end\n end\n logger.debug \"hello\"\n end", "def run_query(terms)\n return Book.run_search(terms)\nend", "def search\n unless params[:search].blank?\n @search = Sunspot.search(KyuEntry) do\n fulltext params[:search]\n end\n @kyu = @search.results\n end\n end", "def search(word, options)\n session.options = adapt_to_dev_env(options)\n session.get(\"/publications?keyword=#{CGI.escape(word)}\")\n end", "def searching\n prepare_phrases.each do |phrase|\n searching_single(phrase)\n end\n end", "def search_on_filename\n needle = query.downcase.as_wiki_link\n all_pages.select { |name| name.downcase.include? needle }.map do |name|\n # unfreeze the String name by creating a \"new\" one\n SearchResult.new(name, 2 * @score, [0, name.tr('_', ' ')])\n end\n end", "def query(needle)\n rx = Regexp.new(needle, Regexp::IGNORECASE) # escaping?\n found = @index.find_all do |term, project|\n rx.match(term)\n end\n results = found.map do |term, project|\n # Yast::Logger#log -> Yast/Logger:log\n path = term.gsub(/::/, \"/\").sub(\"#\", \":\")\n # Term:empty? -> Term%3Aempty%3F\n path = Rack::Utils.escape_path(path)\n Result.new(term, \"http://www.rubydoc.info/#{project}/master/#{path}\")\n end\n results.sort_by(&:text)\n end", "def search(query)\n @search_svc.search query\n end", "def search word\n # No need to enclose the following in a EM.run block b/c TweetStream does this when \n # it initializes the client.\n puts \"entering\"\n q = last_search_query(word) \n current_search = next_search(word, q)\n #jump first one beacause of max_id including last one\n q = q.merge(current_search.next_results)\n current_search = next_search(word, q)\n puts \"#{current_search.attrs[:search_metadata]}\"\n while current_search.next_results? do\n current_search.results.each do |tweet|\n unless same_tweet(tweet)\n raw_tweet_to_tweet(tweet, word).save\n end\n end\n q = q.merge(current_search.next_results)\n current_search = next_search(word, q)\n end\n end", "def query(term, *opts)\n\n opts = {:count => 1, :ratio => 0.0}.merge(opts[0] || {})\n\n return [] if opts[:count] <= 0\n\n resp = JSON.parse(open(search_url term).read, :symbolize_names => true)\n\n resp[:list].map do |res|\n {\n :id => res[:defid],\n :word => res[:word],\n :author => res[:author],\n :permalink => res[:permalink],\n :definition => res[:definition].strip,\n :example => res[:example].strip,\n :upvotes => res[:thumbs_up],\n :downvotes => res[:thumbs_down]\n }\n end.keep_if do |d|\n d[:upvotes]/[d[:downvotes], 0.1].max.to_f >= opts[:ratio]\n end.take opts[:count]\n end", "def search(user, query, collection, wiki)\n end", "def find_words(words)\n search_results = SearchResults.new\n \n general = Vector.new\n must_match = Vector.new\n must_not_match = Vector.new\n not_found = false\n \n extract_words_for_searcher(words.join(' ')) do |word|\n case word[0]\n when ?+\n word = word[1,99]\n vector = must_match\n when ?-\n \t word = word[1,99]\n vector = must_not_match\n else\n \t vector = general\n end\n \n index = @dict.find(word.downcase)\n if index\n vector.add_word_index(index)\n else\n not_found = true\n \t search_results.add_warning \"'#{word}' does not occur in the documents\"\n end\n end\n \n if (general.num_bits + must_match.num_bits).zero? \n search_results.add_warning \"No valid search terms given\"\n elsif not not_found\n res = []\n @document_vectors.each do |entry, (dvec, mtime)|\n score = dvec.score_against(must_match, must_not_match, general)\n res << [ entry, score ] if score > 0\n end\n \n res.sort {|a,b| b[1] <=> a[1] }.each {|name, score|\n search_results.add_result(name, score)\n }\n \n search_results.add_warning \"No matches\" unless search_results.contains_matches\n end\n search_results\n end", "def search(search_terms)\n\n db = Sequel.sqlite(dbfilename)\n dataset = db[:pdfmd_documents].where(\"UPPER(keywords) LIKE UPPER('%#{search_terms[0]}%')\")\n result_files = ''\n dataset.all.each do |match_file|\n match_file.each do |key,value|\n if key == :keywords\n\n # Split the keywords\n keywords = value.downcase.split(/\\s*,\\s*/)\n # Search for matches in the keywords.\n if keywords.find{ |e| /#{search_terms.join(' ').downcase}/ =~ e }\n result_files += match_file[:filename] + \"\\n\"\n end\n end\n\n end\n end\n\n # Ouput result filenames\n result_files\n\n end", "def search(*args)\n search_internal([\"SEARCH\"], *args)\n end", "def search\n\t\t@articles = Article.where(\"text = ?\",params[:q])\n \n #Article.find_by_text(params[:q])\n \n #debug\n @articles.each do |article|\n puts article.title\n end\n \n \n\t\t#@articles = Article.where(:text => params[:q]) ' 1=1 -- '\n\n\t\t#@articles = Article.where(\"text = ?\", params[:q] )\n \n \n #TODO\n # add filter for other fields\n # Article.where(\"text = ? and title = ?\",params[:text],params[:title])\n \n # to add LIKE filter SQL : name like %aa%\n # \"name LIKE ? OR postal_code like ?\", \"%#{search}%\", \"%#{search}%\"\n \n end", "def search_by_keyword(query, o={})\n #debugger\n #debug \"[search_by_keyword] query = #{query}\"\n result = Sunspot.search(Item) do\n keywords query\n if o[:doc_only]\n without :itype_str, Item::ITYPE_CONCEPT#['query','concept','tag']\n end\n #debugger\n o.find_all{|k,v|k.to_s =~ /^facet\\_/}.each do |e|\n #debugger\n with (e[0].to_s.split('_')[1..-1].join('_')).to_sym, e[1] if [e[1]].flatten.first != '-1'\n end\n #debugger\n order_by(:basetime, :desc) if o[:order] == \"recency\" || query == TEXT_DUMMY\n paginate(:page => o[:page], :per_page => o[:per_page]) if o[:page]\n facet(o[:facet]) if o[:facet]\n without :hidden_flag, '1'\n end\n #debugger\n if o[:facet]\n result.facet(o[:facet]).rows\n elsif o[:raw]\n result\n else\n result_items = result.hits.map_with_index{|e,i|{:id=>e.instance.id, :rank=>(i+1), :score=>e.score}}\n @cv.add(:type=>'kwd', :query=>query, :created_at=>(o[:created_at] || Time.now), :history_id=>o[:history_id], :result=>result_items) if o[:add_context]\n result_items\n end\n end", "def article_match? (query, article_title)\n found = false\n return true if query.empty?\n temp_article = article_title.downcase\n query.each do |kw|\n pattern = Regexp.new /.*#{kw.downcase}.*/\n found = true if temp_article =~ pattern\n end\n found\nend", "def apply_search(results:)\n return results unless search_params.present?\n\n terms = search_params[:search_words] || ''\n return results unless terms.present?\n\n results.search(term: terms)\n end", "def search(query)\n\t\t query = \"%#{query}%\"\n\t\t name = arel_table[:name].matches(query)\n\t\t aliases = arel_table[:aliases].matches(query)\n\t\t where(name.or(aliases))\n\t\tend", "def search_word\n @words =\n if login?\n current_user.words.search params[:term]\n else\n Word.search params[:term]\n end\n end", "def find(search_string)\n result_array = []\n search_words = search_string.split(/\\s+/)\n\n # Loop over all entries in the index.\n @data.each{ |entry|\n begin\n # Check whether this entry matches the search words.\n score = 0\n search_words.each{ |search_word|\n next if search_word.empty?\n\n s = 2 * AE::LaunchUp::Scorer.score(search_word, entry[:name]) if entry[:name].is_a?(String)\n s += 2 * AE::LaunchUp::Scorer.score(search_word, entry[:description]) if entry[:description].is_a?(String)\n s += 2 * AE::LaunchUp::Scorer.score(search_word, entry[:category]) if entry[:category].is_a?(String)\n s += exact_matches(search_word, entry[:keywords].join(\" \"))/(entry[:keywords].length|1).to_f if entry[:keywords].is_a?(Array) && !entry[:keywords].empty?\n s += 2 * AE::LaunchUp::Scorer.score(search_word, entry[:keywords].join(\" \")) if entry[:keywords].is_a?(Array)\n s += exact_matches( search_word.gsub(/\\/|\\\\/, \"\"), entry[:file].gsub(/\\/|\\\\/, \"\") ) if entry[:file].is_a?(String) && search_word.length > 4\n\n # Skip if no match has been found.\n break score = 0.0 if s == 0.0\n score += s\n }\n\n # Tweaks for relevance:\n # Entries with icons match better with users's expectation,\n # urls or \"about\" rather not.\n score *= 3 if entry[:icon].is_a?(String)\n #score *= 0.5 if entry[:name][/about|paypal/i] || entry[:description][/http/]\n\n # Check wether the command is available in the current context. We don't\n # want to reject it completely from the search results, so that the user\n # won't miss it in an explicit search will. We give a hint if it's disabled.\n if entry[:validation_proc]\n status = nil\n begin\n status = entry[:validation_proc].call == MF_ENABLED\n rescue LocalJumpError => e\n # Validation proc contains a \"return\"?\n $stderr.write(\"Validation proc of '#{entry[:name]}' (#{entry[:id]}) contains 'return'\\n#{e.message.to_s}\\n#{e.backtrace.join(\"\\n\")}\" << $/)\n rescue Exception => e\n # Validation proc contains other bug.\n $stderr.write(\"Error in validation proc of '#{entry[:name]}' (#{entry[:id]})\\n#{e.message.to_s}\\n#{e.backtrace.join(\"\\n\")}\" << $/) if $VERBOSE\n end\n entry[:enabled] = status\n score *= 0.5 if status == false\n end\n\n # Skip if no match has been found.\n next if score < 1.0\n\n # Consider tracking data, how often this entry has been selected over others:\n # Divide track by half of average track (total_track/data.length).\n score += [entry[:track] / (@total_track|1).to_f * 0.5 * @data.length, 5].min if entry[:track]\n entry[:score] = score\n\n # Add it to results.\n result_array << entry\n rescue Exception => e\n $stderr.write(\"AE::LaunchUp::Index: Error in 'find' when searching '#{entry[:name]}' (#{entry[:id]})\\n#{e.message.to_s}\\n#{e.backtrace.join(\"\\n\")}\" << $/)\n break\n end\n }\n\n return result_array\n rescue Exception => e\n $stderr.write(\"AE::LaunchUp::Index: Error in 'find' when searching '#{search_string}'\\n#{e.message.to_s}\\n#{e.backtrace.join(\"\\n\")}\" << $/)\n return []\n end", "def search q\n @db.fetch('SELECT * FROM files WHERE path MATCH ?', q).all\n end", "def search\n\n end", "def search_phrase(search, admin)\n search = search.split(\" \")\n permissions = admin ? @@member + @@admin_only : @@member\n @results = []\n\n for col in permissions\n for word in search\n self.update_results(col, word)\n end\n end\n @results\n end", "def search(query)\n return [] if query.nil?\n\n @atoms = @storage.fetch(cleanup_atoms(query), query[/\\^/])\n queries = parse_query(query.dup)\n positive = run_queries(queries[:positive])\n positive_quoted = run_quoted_queries(queries[:positive_quoted])\n negative = run_queries(queries[:negative])\n negative_quoted = run_quoted_queries(queries[:negative_quoted])\n starts_with = run_queries(queries[:starts_with], true)\n start_quoted = run_quoted_queries(queries[:start_quoted], true)\n\n results = ActiveSupport::OrderedHash.new\n\n if queries[:start_quoted].any?\n results = merge_query_results(results, start_quoted)\n end\n\n if queries[:starts_with].any?\n results = merge_query_results(results, starts_with)\n end\n\n if queries[:positive_quoted].any?\n results = merge_query_results(results, positive_quoted)\n end\n\n if queries[:positive].any?\n results = merge_query_results(results, positive)\n end\n\n negative_results = (negative.keys + negative_quoted.keys)\n results.delete_if { |r_id, w| negative_results.include?(r_id) }\n results\n end", "def search_search_text\n query\n .where(localized_search_text_in(:title), text: \"%#{search_text}%\")\n .or(query.where(localized_search_text_in(:description), text: \"%#{search_text}%\"))\n end", "def search(search, admin)\n if !search.blank?\n @results = []\n if !search.strip.include? \" \"\n @results = self.search_keyword(search, admin)\n else\n @results = self.search_phrase(search, admin)\n end\n end\n end", "def search(params)\n filter_name, keywords, field_queries = extract params\n scope = filtered_by filter_name\n query = text_search keywords\n query = field_search field_queries, query\n scope.where query\n end", "def search; end", "def search(term)\n # pattern = Regexp.new(pattern, case_insensitive=true)\n # pattern = Regexp.new(pattern, Regexp::EXTENDED | Regexp::IGNORECASE)\n # pattern = Regexp.new(pattern)\n pattern = Regexp.new(term)\n select do |tweet|\n tweet.full_text =~ pattern\n end\n end", "def search_with_index query\n docs = []\n return docs if query.terms.empty?\n load if @content.nil?\n return docs if @content.nil?\n index = {}\n query.terms.each do |term|\n if term.operator == :eq && term.value.class != Regexp\n set = @attribute_storage[term.field][term.value]\n else\n set = @content.select do |doc|\n term.compare(doc.send(term.field))\n end\n end\n\n if !set.nil? && !set.empty?\n if docs.empty?\n docs = set\n if query.relation == :and\n docs.each do |value|\n index[value] = nil\n end\n end\n else\n if query.relation == :or\n docs += set\n else\n set.each do |value|\n if !index.has_key? value\n docs << value\n index[value] = nil\n end\n end\n end\n end\n end\n end\n docs\n end", "def query_words\n @query_words ||= Query.query_words @text\n end", "def search\n words = params[:query].split(/\\s+/).reject(&:blank?)\n url = nil\n\n case words.size\n when 1\n if words.first.starts_with?('@')\n user = find_users(words.first[1..-1]).only\n url = user_url(user) if user\n else\n project = find_projects(words[0]).only.try!(:sluggable)\n url = project_url(project) if project\n end\n when 2\n project = find_projects(words[0]).only.try!(:sluggable)\n env = find_environments(project, words[1]).only if project\n url = project_environment_bugs_url(project, env) if env\n when 3\n project = find_projects(words[0]).only.try!(:sluggable)\n env = find_environments(project, words[1]).only if project\n bug = env.bugs.find_by_number(words[2].to_i) if env\n url = project_environment_bug_url(project, env, bug) if bug\n when 4\n project = find_projects(words[0]).only.try!(:sluggable)\n env = find_environments(project, words[1]).only if project\n bug = env.bugs.find_by_number(words[2].to_i) if env\n occurrence = bug.occurrences.find_by_number(words[3].to_i) if bug\n url = project_environment_bug_occurrence_url(project, env, bug, occurrence) if occurrence\n end\n\n url ? render(text: url) : head(:ok)\n end", "def includes_all_words?(string, query)\n words = parse_words(string)\n query.all? { |pattern| words.any? { |word| word.ilike?(pattern) }}\n end", "def matches(query)\n @matches ||= search(query)\n end", "def perform\n raise ArgumentError, \"Need phrase to perform search\" unless @phrase\n uri = Addressable::URI.parse(FT.conf[:url])\n uri.query_values = {\n key: FT.conf[:key],\n phrase: @phrase,\n extension: ( @ext ? @ext : nil ),\n page: ( @page ? @page : nil )\n }\n\n http = Curl.get(uri.to_s)\n \n Crack::XML.parse(http.body_str)\n end", "def search(word)\n results = []\n @books.each {|item| results << item if item[0].downcase.include?(word) }\n results.empty? ? (puts \"Search returned no results...\") : (results.each {|item| puts \"#{item[0]} (#{item[1]})\"})\n end", "def search_search(exploits_array, query)\n search_results=[]\n exploits_array.each do |line|\n line = line.unpack('C*').pack('U*') if !line.valid_encoding?\n if query == 'nil'\n search_results << line\n else\n search_results << line if line =~ /#{query}/i\n end\n end\n return search_results\nend", "def search\n # query_param = params[:query].downcase\n # @found_articles = Article.all.where(\"lower(title) LIKE :query\", query: query_param)\n # render \"search\"\n end", "def search(query)\n return [] if query.nil?\n\n @atoms = @storage.fetch(cleanup_query_tokens(query), query[/\\^/])\n queries = parse_query(query.dup)\n positive = run_queries(queries[:positive])\n positive_quoted = run_quoted_queries(queries[:positive_quoted])\n negative = run_queries(queries[:negative])\n negative_quoted = run_quoted_queries(queries[:negative_quoted])\n starts_with = run_queries(queries[:starts_with], true)\n start_quoted = run_quoted_queries(queries[:start_quoted], true)\n\n results = {}\n\n if queries[:start_quoted].any?\n results = merge_query_results(results, start_quoted)\n end\n\n if queries[:starts_with].any?\n results = merge_query_results(results, starts_with)\n end\n\n if queries[:positive_quoted].any?\n results = merge_query_results(results, positive_quoted)\n end\n\n if queries[:positive].any?\n results = merge_query_results(results, positive)\n end\n\n negative_results = (negative.keys + negative_quoted.keys)\n results.delete_if { |r_id, w| negative_results.include?(r_id) }\n results\n end", "def search\n\t\tsearch_service = ApplicationService.get :SearchService\n\t\tquery_words = get_word_list params[:query]\n\t\tfiltered_query_words = []\n\t\tquery_words.each { |word| filtered_query_words << word unless search_service.is_stop_word word }\n\n\t\tlimit = params[:page_size] ? params[:page_size].to_i : 100\n\t\tpage = params[:page] ? params[:page].to_i - 1 : 0\n\t\toffset = page * limit\n\n\t\tresults = []\n\n\t\tif query_words.size > 0\n\t\t\tsearch_entries = SearchEntry.where('word REGEXP ?', query_words.join(\"|\"))\n\t\t\t\t\t\t\t\t\t\t.select(:product_id, :id)\n\t\t\t\t\t\t\t\t\t\t.select(\"sum(frequency) as total_frequency\")\n\t\t\t\t\t\t\t\t\t\t.group(:product_id)\n\t\t\t\t\t\t\t\t\t\t.order(frequency: :desc)\n\t\t\t\t\t\t\t\t\t\t.limit(limit)\n\t\t\t\t\t\t\t\t\t\t.offset(offset)\n\t\t\t\t\t\t\t\t\t\t.all\n\n\t\t\tsearch_entries.each do |entry|\n\t\t\t\tproduct = entry.product\n\t\t\t\tresults << {\n\t\t\t\t\tproduct_id: product.id,\n\t\t\t\t\tproduct_name: product.product_name,\n\t\t\t\t\tproduct_price: product.price,\n\t\t\t\t\tproduct_type: product.product_type,\n\t\t\t\t\tthumbnail: product.thumbnail\n\t\t\t\t}\n\t\t\tend\n\t\tend\n\n\t\tpayload = {\n\t\t\tresults: results\n\t\t}\n\n\t\trender status: 200, json: payload\n\tend", "def run\n searching if @nr_of_words >= @phrase_length\n end", "def search(str)\n return [] if str.to_s.empty?\n \n words = str.downcase.split(' ')\n pattern = Regexp.new(words.join('|'))\n matches = []\n\n pages.each do |page|\n if page.title.downcase =~ pattern\n matches << [page, []]\n \n elsif page.body.downcase =~ pattern\n matches << [page, highlight(page.html, words)]\n end\n end\n\n matches\n end", "def search(search_string)\n\n # Convert to a get-paramenter\n search_string = CGI.escapeHTML search_string\n search_string.gsub!(\" \", \"&nbsp;\")\n\n results = []\n \n return results\n end", "def search(term, opts = {})\n raise \"Index not found at path #{@index_path}\" unless File.exists? @index_path\n\n results = []\n\n query = make_query(term, opts[:exact])\n\n @index.execute(\"SELECT sequence_number, kanji, kana, senses, bm25(search) as score FROM search WHERE search MATCH ? LIMIT ?\", query, opts[:max_results]) do |row|\n entry = Entry.from_sql(row)\n score = 0.0\n\n is_exact_match = entry.kanji.include?(term) || entry.kana.include?(term)\n score = 1.0 if is_exact_match\n\n should_add = !opts[:exact] || (opts[:exact] && is_exact_match)\n\n # add the result\n results << [score, entry] if should_add\n end\n\n # Sort the results by first column (score) and return only the second column (entry)\n results.sort_by { |entry| -entry[0] }.map { |entry| entry[1] }\n end", "def query_fulltext_regexp( query )\n read_db do |dbm|\n dbm.each_value do |raw_val|\n val = RDictCcEntry.format_str(raw_val)\n match_line_found = false\n val.each_line do |line|\n if line =~ /^\\s+/\n if match_line_found\n puts line\n else\n # Skip lines starting with blanks, because these are already\n # translations and they don't belong to the matching line.\n next\n end\n else\n match_line_found = false\n end\n if line.downcase =~ /#{query}/\n puts line\n match_line_found = true\n end\n end\n end\n end\n end", "def my_include?(dict, query)\n dict.each do |valid_word|\n if valid_word == query\n return true\n end\n end\n return false\nend", "def search\n @recipes = Recipe.find_by_contents params[:query]\n end", "def search_text(\n query, case_sensitive: false, whole_sentence: true,\n limit: 10, skip: 0, sentence_limit: 80, top_result_only: false\n )\n results = search(\n query,\n case_sensitive: case_sensitive,\n whole_sentence: whole_sentence,\n limit: limit,\n skip: skip\n )\n\n results\n .map do |doc|\n yield(doc) if block_given?\n\n results = doc.search(\n query,\n case_sensitive: case_sensitive,\n whole_sentence: whole_sentence,\n sentence_limit: sentence_limit\n )\n\n # Only return result if its text has a match - compact is called below.\n next nil if results.empty?\n\n [doc.url, (top_result_only ? results.first : results)]\n end\n .compact\n .to_h\n end", "def search(\n query, case_sensitive: false, whole_sentence: true, limit: 10, skip: 0\n )\n query = query.to_s.strip\n query.replace('\"' + query + '\"') if whole_sentence\n\n # Sort based on the most search hits (aka \"textScore\").\n # We use the sort_proj hash as both a sort and a projection below.\n sort_proj = { score: { :$meta => 'textScore' } }\n query = { :$text => {\n :$search => query,\n :$caseSensitive => case_sensitive\n } }\n\n results = retrieve(:documents, query,\n sort: sort_proj, projection: sort_proj,\n limit: limit, skip: skip)\n return [] if results.count < 1 # respond_to? :empty? == false\n\n # results.respond_to? :map! is false so we use map and overwrite the var.\n results = results.map { |mongo_doc| Wgit::Document.new(mongo_doc) }\n results.each { |doc| yield(doc) } if block_given?\n\n results\n end", "def search_dictionary_entries(dictionary, query, opts = {})\n @transporter.read(\n :POST,\n path_encode('/1/dictionaries/%s/search', dictionary),\n { query: query },\n opts\n )\n end", "def search(*args)\n end", "def query(keyword)\n return unless keyword\n\n where(\"title ILIKE '%?%'\", keyword)\n end", "def search(query)\n alert_setup_incomplete && return unless is_setup_ok?\n client = get_client\n query = \"tag:#{query}\" if options[:tag]\n client.search query\n end", "def simple_search(query)\n search({:query => query})\n end", "def parse_search; end", "def search\n @users = User.search(@search_term)\n @tweets = Tweet.search(@search_term)\n @tags = Tag.search(@search_term)\n end", "def keyword\n\n @search_text = params[\"search_text\"] ||= \"\"\n\n add_breadcrumb \"Keyword Search: '#{@search_text}'\"\n\n if @search_text.blank? or @search_text.length < 2\n @keyword_search_results = KeywordSearchIndex.where(\"1 = 2\")\n else\n\n # here we build the query one clause at a time based on the input params. The query\n # is of the form:\n #\n # where organization_id IN (?) AND (search_text LIKE ? OR search_text_like ? OR ... )\n\n where_clause = 'organization_id IN (?) AND ('\n values = []\n # The organization is scoped to search across all objects that are owned by\n # the user's list of organizations\n orgs = @organization_list.dup\n # add org = 0 to get objects that are not indexed by org and are by\n # contract available to users of all organizations\n orgs << 0\n values << orgs\n\n search_params = []\n @search_text.split(\",\").each_with_index do |search_string|\n search_params << 'search_text LIKE ?'\n values << \"%#{search_string.strip}%\"\n end\n\n where_clause << search_params.join(' OR ')\n where_clause << ')'\n\n @keyword_search_results = KeywordSearchIndex.where(where_clause, *values)\n\n end\n\n @num_rows = @keyword_search_results.count\n cache_list(@keyword_search_results, INDEX_KEY_LIST_VAR)\n\n respond_to do |format|\n format.html\n format.json {\n render :json => {\n :total => @num_rows,\n :rows => data\n }\n }\n end\n\n end", "def query\n Riddle::Query.escape params[:search_txt]\n end", "def search(\n query, case_sensitive: false, whole_sentence: true, limit: 10, skip: 0\n )\n query = query.to_s.strip\n query.replace('\"' + query + '\"') if whole_sentence\n\n # Sort based on the most search hits (aka \"textScore\").\n # We use the sort_proj hash as both a sort and a projection below.\n sort_proj = { score: { :$meta => 'textScore' } }\n query = { :$text => {\n :$search => query,\n :$caseSensitive => case_sensitive\n } }\n\n results = retrieve(DOCUMENTS_COLLECTION, query,\n sort: sort_proj, projection: sort_proj,\n limit: limit, skip: skip)\n\n results.map do |mongo_doc|\n doc = Wgit::Document.new(mongo_doc)\n yield(doc) if block_given?\n doc\n end\n end", "def search(query, reverse = false)\n doc = fetch_parsed_response(query, reverse)\n doc && doc['status'] == \"OK\" ? doc : nil\n end", "def search(*rules); end", "def search(*rules); end", "def search\n end", "def search_by_name(query, full_text_search = false)\n query_word_regexps = query.split.map { |word| /#{word}/i }\n if full_text_search\n query_word_results_hash = {}\n updated_search_index.each_value do |word_spec_hash|\n word_spec_hash.each_pair do |word, spec_names|\n query_word_regexps.each do |query_word_regexp|\n set = (query_word_results_hash[query_word_regexp] ||= Set.new)\n set.merge(spec_names) if word =~ query_word_regexp\n end\n end\n end\n found_set_names = query_word_results_hash.values.reduce(:&)\n found_set_names ||= []\n sets = found_set_names.map do |name|\n aggregate.representative_set(name)\n end\n # Remove nil values because representative_set return nil if no pod is found in any of the sources.\n sets.compact!\n else\n sets = aggregate.search_by_name(query, false)\n end\n if sets.empty?\n extra = ', author, summary, or description' if full_text_search\n raise Informative, \"Unable to find a pod with name#{extra} \" \\\n \"matching `#{query}`\"\n end\n sorted_sets(sets, query_word_regexps)\n end", "def perfect_match\n query = @query.downcase.as_wiki_link\n page = all_pages.detect { |name| name.downcase == query }\n SearchResult.new(page, 1) if page\n end", "def search(query, notes_relation = @notes)\n pattern = \"%#{query}%\"\n notes_relation.where(Note.arel_table[:note].matches(pattern))\n end", "def search_words\n @search_words ||= Tokenizer.new(search_param).words_with_terminator\n end", "def fuzzy_match( query )\n return self.keywords.fuzzy_match(query)\n end", "def search \n\n end", "def custom_search(params_list)\n search_terms = params_list\n #print \"\\n\\n\\n***#{search_terms.size}***\\n\\n\\n\" ==> gives # of letters in the :query\n search_terms_array = search_terms.split(' ')\n \n ids = Array.new\n \n for word in search_terms_array\n word = word.singularize()\n isaColorWord = Color.find(:all, :conditions => [\"name = ?\", word])\n if isaColorWord.size > 0 #The size should only ever be 1\n ids.concat(Listing.connection.select_all(\"select distinct listings.id from listings\n inner join colors_listings on listings.id = colors_listings.listing_id\n where colors_listings.color_id = \\'\" + isaColorWord[0].id.to_s + \"\\'\").map{|i| i['id'].to_i})\n \n else\n p_word = '%' + word + '%'\n temp_ids = Listing.connection.select_all('select distinct listings.id \n from listings, fabrics, themes, patterns where \n listings.fabric_type = fabrics.id and \n listings.theme = themes.id and \n listings.pattern = patterns.id and \n concat(fabrics.fabric_type, \\'////\\', themes.name, \n \\'////\\', patterns.pattern_type, \\'////\\', listings.description) \n like \\'' + p_word + '\\'').map{|i| i['id'].to_i}\n if temp_ids.size > 0 \n ids.concat(temp_ids)\n else\n ids.concat(Listing.find(:all, :conditions => Listing.conditions_by_like(word)))\n end\n end\n \n end #for\n \n ids\n Listing.find(ids)\n #print \"\\n\\n\\n***293 #{ids.size}***\\n\\n\\n\"\n \n end", "def search_book(query)\n\n @book = Book.where(\"title like ?\" , query + '%')\n\n @book\n\n end", "def search\n @search = Sunspot.search(Job) do\n keywords(params[:searchterm])\n end\n end" ]
[ "0.7479644", "0.7378742", "0.72571397", "0.709236", "0.6955676", "0.69323564", "0.678248", "0.6759103", "0.6757562", "0.6754827", "0.67451084", "0.67448854", "0.67338794", "0.6714071", "0.66898596", "0.66889894", "0.6687218", "0.66858417", "0.66459876", "0.6641491", "0.6616201", "0.6571523", "0.655174", "0.6538715", "0.65334934", "0.65334934", "0.65327555", "0.65203846", "0.65143776", "0.65000814", "0.6494622", "0.6462789", "0.6460573", "0.64468616", "0.64359546", "0.6396256", "0.6384481", "0.63842154", "0.6381538", "0.6377543", "0.63514435", "0.63405836", "0.63377047", "0.63356113", "0.6329169", "0.6323664", "0.63204724", "0.63145894", "0.6310718", "0.63104904", "0.631013", "0.6305784", "0.6305672", "0.6298175", "0.6289368", "0.62838155", "0.6282283", "0.62813663", "0.6276928", "0.62740225", "0.6266037", "0.6254971", "0.6254856", "0.6253844", "0.62519485", "0.6229718", "0.62174094", "0.62063664", "0.6199718", "0.61979246", "0.61935353", "0.61920524", "0.61912477", "0.61853474", "0.6182431", "0.6176835", "0.617425", "0.6173966", "0.61714906", "0.6167996", "0.6167606", "0.616194", "0.6157455", "0.6156588", "0.61549526", "0.6146748", "0.61455286", "0.6142232", "0.61394846", "0.61394846", "0.613565", "0.6131774", "0.6127443", "0.61267257", "0.6126066", "0.61235464", "0.61221147", "0.61179084", "0.61096966", "0.610728" ]
0.67104805
14
Search Kanji entries matching the given query string
def search_kanji(query, limit = 10) tokens = query.chars.select { |c| c.kanji? } results = [] if tokens.present? @search_kanji ||= @database.prepare(SEARCH_KANJI_SQL) rows = @search_kanji.execute(tokens.join(' OR '), limit).to_a end rows.map do |row| kanji = Kanji.decode(row['serialized']) kanji.character = row['character'] kanji.id = row['id'].to_i kanji end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search(query); end", "def search(query)\n @all.each_with_object([]) do |record, matches|\n matches << record if ((record['866']['t'] == query) || (record['866']['s'] == query))\n end\n end", "def search\n unless params[:search].blank?\n @search = Sunspot.search(KyuEntry) do\n fulltext params[:search]\n end\n @kyu = @search.results\n end\n end", "def search(term, opts = {})\n raise \"Index not found at path #{@index_path}\" unless File.exists? @index_path\n\n results = []\n\n query = make_query(term, opts[:exact])\n\n @index.execute(\"SELECT sequence_number, kanji, kana, senses, bm25(search) as score FROM search WHERE search MATCH ? LIMIT ?\", query, opts[:max_results]) do |row|\n entry = Entry.from_sql(row)\n score = 0.0\n\n is_exact_match = entry.kanji.include?(term) || entry.kana.include?(term)\n score = 1.0 if is_exact_match\n\n should_add = !opts[:exact] || (opts[:exact] && is_exact_match)\n\n # add the result\n results << [score, entry] if should_add\n end\n\n # Sort the results by first column (score) and return only the second column (entry)\n results.sort_by { |entry| -entry[0] }.map { |entry| entry[1] }\n end", "def search_search(exploits_array, query)\n search_results=[]\n exploits_array.each do |line|\n line = line.unpack('C*').pack('U*') if !line.valid_encoding?\n if query == 'nil'\n search_results << line\n else\n search_results << line if line =~ /#{query}/i\n end\n end\n return search_results\nend", "def index_by_keyword\n entries = Entry.all\n @selected_entries = Array.new\n @keyword = params[:keyword]\n entries.each do |entry|\n if /#{@keyword}/.match(entry.content) ||\n /#{@keyword}/.match(entry.tags) ||\n /#{@keyword}/.match(entry.title)\n @selected_entries << entry\n end\n end\n logger.debug \"hello\"\n end", "def parse_search(q)\n # TODO continue\n end", "def search(query, options = {}); end", "def query(keyword)\n return unless keyword\n\n where(\"title ILIKE '%?%'\", keyword)\n end", "def match_query(query); end", "def search\n # query_param = params[:query].downcase\n # @found_articles = Article.all.where(\"lower(title) LIKE :query\", query: query_param)\n # render \"search\"\n end", "def search\n\n end", "def search_words(query)\n query = query.downcase\n\n if query.contains_japanese?\n words = search_words_by_literals(query, 50)\n\n else\n words = search_words_by_senses(query, 50)\n\n if words.size <= 10\n extra_words = []\n\n extra_words += search_words_by_literals(query.hiragana, 20)\n extra_words += search_words_by_literals(query.katakana, 20)\n extra_words.sort! { |w1, w2| w1.score <=> w2.score }\n\n words += extra_words\n end\n end\n\n words\n end", "def search\n expose Challenge.search(@oauth_token, params[:keyword])\n end", "def search; end", "def search\n search_param = params[:term]\n matching_keys = UserKey.submitted.search(search_param).collect { |u| { value: \"#{u.name}\", data: u.id } }\n render json: { suggestions: matching_keys }\n end", "def search(query)\n\t\t query = \"%#{query}%\"\n\t\t name = arel_table[:name].matches(query)\n\t\t aliases = arel_table[:aliases].matches(query)\n\t\t where(name.or(aliases))\n\t\tend", "def search\n\t\t@articles = Article.where(\"text = ?\",params[:q])\n \n #Article.find_by_text(params[:q])\n \n #debug\n @articles.each do |article|\n puts article.title\n end\n \n \n\t\t#@articles = Article.where(:text => params[:q]) ' 1=1 -- '\n\n\t\t#@articles = Article.where(\"text = ?\", params[:q] )\n \n \n #TODO\n # add filter for other fields\n # Article.where(\"text = ? and title = ?\",params[:text],params[:title])\n \n # to add LIKE filter SQL : name like %aa%\n # \"name LIKE ? OR postal_code like ?\", \"%#{search}%\", \"%#{search}%\"\n \n end", "def search(word)\n \n end", "def search q\n @db.fetch('SELECT * FROM files WHERE path MATCH ?', q).all\n end", "def search_on_filename\n needle = query.downcase.as_wiki_link\n all_pages.select { |name| name.downcase.include? needle }.map do |name|\n # unfreeze the String name by creating a \"new\" one\n SearchResult.new(name, 2 * @score, [0, name.tr('_', ' ')])\n end\n end", "def keyword\n\n @search_text = params[\"search_text\"] ||= \"\"\n\n add_breadcrumb \"Keyword Search: '#{@search_text}'\"\n\n if @search_text.blank? or @search_text.length < 2\n @keyword_search_results = KeywordSearchIndex.where(\"1 = 2\")\n else\n\n # here we build the query one clause at a time based on the input params. The query\n # is of the form:\n #\n # where organization_id IN (?) AND (search_text LIKE ? OR search_text_like ? OR ... )\n\n where_clause = 'organization_id IN (?) AND ('\n values = []\n # The organization is scoped to search across all objects that are owned by\n # the user's list of organizations\n orgs = @organization_list.dup\n # add org = 0 to get objects that are not indexed by org and are by\n # contract available to users of all organizations\n orgs << 0\n values << orgs\n\n search_params = []\n @search_text.split(\",\").each_with_index do |search_string|\n search_params << 'search_text LIKE ?'\n values << \"%#{search_string.strip}%\"\n end\n\n where_clause << search_params.join(' OR ')\n where_clause << ')'\n\n @keyword_search_results = KeywordSearchIndex.where(where_clause, *values)\n\n end\n\n @num_rows = @keyword_search_results.count\n cache_list(@keyword_search_results, INDEX_KEY_LIST_VAR)\n\n respond_to do |format|\n format.html\n format.json {\n render :json => {\n :total => @num_rows,\n :rows => data\n }\n }\n end\n\n end", "def simple_search(query)\n search({:query => query})\n end", "def search \n\n end", "def index\n @cetak_bloks = params[:q] ? CetakBlok.search_for(params[:q]) : CetakBlok.all \n end", "def index\n filter = params[:searchString] || ''\n filter = filter.tr('^A-Za-zА-Яа-я0-9', '')\n if not filter.blank?\n @tags = Tag.where('lower(name) like ?', \"%#{filter.mb_chars.downcase.to_s}%\").order(:name)\n else\n @tags = Tag.all\n end\n end", "def search(word)\n Parser.new(query(word)).parse\n end", "def index\n @kai2_ji7s = Kai2Ji7.where('\"無齊記號\" LIKE ?','%*%*%*%*%*%').order('LENGTH(REPLACE(\"無齊記號\",\\'\\n\\',\\'\\')) ASC').limit(100)\n end", "def search\n end", "def search_songs(query)\n search('Songs', query)\n end", "def search_in(label, string)\n if !LABELS.include? label.to_sym\n raise ArgumentError, \"Unknown key: #{label}\"\n end\n\n find_all do |entry|\n text = entry.send(label).str\n text.match(/#{string}/i)\n end\n end", "def search\n\n # 検索条件設定\n conditions = KanseiBuhin.where(\"1 = ?\", 1)\n conditions = conditions.where(\"\\\"buhinNm\\\" LIKE ?\", params[:buhinNm] + \"%\") if params[:buhinNm] != \"\"\n conditions = conditions.where(\"\\\"katashikiCd\\\" >= ?\", params[:katashikiCdFrom].to_i) if params[:katashikiCdFrom] != \"\"\n conditions = conditions.where(\"\\\"katashikiCd\\\" <= ?\", params[:katashikiCdTo].to_i) if params[:katashikiCdTo] != \"\"\n conditions = conditions.where(\"\\\"katashikiNm\\\" LIKE ?\", params[:katashikiNm] + \"%\") if params[:katashikiNm] != \"\"\n\n logger.debug(conditions)\n\n records = conditions.count\n limit = params[:rows].to_i\n page = params[:page].to_i\n if records > 0\n n = records.quo(limit)\n total_pages = n.ceil\n else\n total_pages = 0\n end\n\n # 検索開始\n start = limit * page - limit;\n @kanseiBuhins = conditions.find(\n :all,\n :offset => start,\n :limit => limit,\n :order => \"\\\"buhinCd\\\"\")\n\n # 値の格納\n @responce = {\n total: total_pages.to_s,\n page: params[:page],\n records: records.to_s,\n rows: @kanseiBuhins\n }\n #logger.debug(@responce)\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @responce }\n end\n end", "def index\n if params[:query].present?\n sql_query = \"name ILIKE :query OR syllabus ILIKE :query\"\n @schools = School.where(sql_query, query: \"%#{params[:query]}%\")\n else\n @schools = School.all\n end\n end", "def search(query)\n params = {\n query: query\n }.compact\n\n _get(\"/dns/search/keyword\", params) { |json| json }\n end", "def find\n\t\trender json: Subject.where(\"name LIKE ?\",\"%#{params[:term].titlecase}%\")\n\tend", "def blacklight_keyword_search_url(request, options = {})\n options[:format] ||= \"atom\"\n options[:content_format] ||= \"marc\"\n \n clauses = []\n\n # We need both title and author to search keyword style, or\n # we get too many false positives. Except serials we'll do\n # title only. sigh, logic tree. \n \n # Also need to use appropriate 'container' title if avail, not\n # article title. \n metadata = request.referent.metadata\n title = metadata['jtitle'] \n title = metadata['btitle'] if title.blank?\n title = metadata['title'] if title.blank?\n # remove sub-title for better search\n title.gsub!(/\\:.*\\Z/, '') if title\n\n author = get_top_level_creator(request.referent)\n return nil unless title && (author || (@bl_fields[\"serials_limit_clause\"] && title_is_serial?(request.referent)))\n # phrase search for title, just raw dismax for author\n # Embed quotes inside the quoted value, need to backslash-quote for CQL,\n # and backslash the backslashes for ruby literal. \n clauses.push(\"#{@bl_fields[\"title\"]} = \\\"\\\\\\\"#{escape_for_cql_double_quotes title}\\\\\\\"\\\"\") \n clauses.push(\"#{@bl_fields[\"author\"]} = \\\"#{escape_for_cql_double_quotes author}\\\"\") if author\n \n url = base_url + \"?search_field=#{@cql_search_field}&content_format=#{options[:content_format]}&q=#{CGI.escape(clauses.join(\" AND \"))}\"\n\n if (@bl_fields[\"serials_limit_clause\"] &&\n title_is_serial?(request.referent)) \n url += \"&\" + @bl_fields[\"serials_limit_clause\"]\n end\n \n return url\n end", "def search_by_zip_code\n zip_codes = []\n if not (term = params[:term].to_s.strip).blank? and term.to_i > 0\n zip_codes = ZipCode.find(:all, :conditions => [\"code = ?\", term])\n zip_codes.compact!\n end\n render :json => format_suggested_cities_by_zip_code(zip_codes).to_json\n end", "def search(project: nil, text: nil, kind: 'subject', tags: { }, page: 1, results: [])\n raise 'no project specified' unless project\n tag_query = tags.each_pair.collect{ |k, v| \"tags[#{ k }]=#{ v }\" }.join '&'\n uri = URI.parse \"https://api.zooniverse.org/projects/#{ project }/talk/search?text=#{ text }&kind=#{ kind }&#{ tag_query }&per_page=20&page=#{ page }\"\n req = Net::HTTP::Get.new uri.to_s\n http = Net::HTTP.new uri.host, uri.port\n http.use_ssl = true\n res = http.request req\n json = JSON.parse res.body\n \n pages = (json['total'] / json['per_page'].to_f).ceil\n \n # More than 1,000 results\n if page == 1 && pages > 50\n puts \"\\n\\nThis query has #{ json['total'] } results.\"\n puts \"It could take a long time and degrade server performance.\"\n puts \"Are you really sure you want to run this query? (y/n)\"\n return unless gets.chomp =~ /y/i\n end\n \n if json['page'] < pages\n puts \"#{ json['page'] } / #{ pages }\"\n search project: project, text: text, kind: kind, tags: tags, page: page + 1, results: results + json['results']\n else\n results + json['results']\n end\nend", "def suggestion_search\n keywords = params[:q]\n keywords = Utils.nomalize_string(keywords)\n\n if keywords.include? '-'\n keywords = keywords.gsub! '-', \" \"\n end\n\n pattern = /#{Regexp.escape(keywords)}/i\n\n users = User.or({:name => pattern}, {:email => pattern}).map { |user|\n UserSerializer.new(user).suggestion_search_hash\n }\n\n render json: users, root: false\n return\n end", "def search_text(query, text)\n text = pattern(text)\n query.where { title.ilike(text) | description.ilike(text) }\n end", "def find\n\t\tputs \"Enter keyword, EX: Mexican or Mex\"\n\t\tuserKeyword = gets.chomp.downcase.strip\n\t\trestaurants = Restaurant.saved_restaurants\n\t\tfound_restaurant_array = []\n\t\t\trestaurants.each do |rest|\n\t\t\tif rest.cuisine.include?(userKeyword)\n\t\t\t\tfound_restaurant_array << rest\n\t\t\tend\n\t\tend\n\t\tif !found_restaurant_array.empty?\n\t\toutput_restaurant_table(found_restaurant_array)\n\t\telse\n\t\t\toutput_action_footer(\"oops! could not find an entry\")\n\t\tend\n\tend", "def search(user, query, collection, wiki)\n end", "def index\n keyword = params[:keyword]\n if keyword.nil?\n @anns = Ann.all\n else\n @anns = Ann.where(\"name like ?\", \"%#{keyword}%\")\n end\n end", "def search\n @songs = Song.ransack(name_cont: params[:q]).result(distinct: true).limit(5)\n end", "def search\n # Query\n @q = params[:q]\n @query = @q.split(/\\s(?=(?:[^\"]|\"[^\"]*\")*$)/).map do |i|\n (i =~ /^(?:([A-Za-z_]+):)?\"?(.*?)\"?$/) ? [$2, $1] : [i, nil]\n end\n # Results\n return if (m = @project.miga).nil? or\n (r = m.result(:project_stats)).nil? or\n (db_file = r.file_path(:metadata_index)).nil?\n db = SQLite3::Database.new db_file\n @results = nil\n @query.each do |q|\n q[0] = q[0].downcase.gsub(/[^A-Za-z0-9\\-]+/, \" \")\n res = db.execute(\"select distinct(name) from metadata \" +\n \"where value like ? #{\"and field=?\" unless q[1].nil?}\",\n [\"% #{q[0]} %\"] + (q[1].nil? ? [] : [q[1]])).flatten\n @results.nil? ? @results=res :\n @results.select!{ |i| res.include? i }\n end\n reference_datasets\n end", "def search(query)\n @client.get('/BikePoint/Search', { query: query })\n end", "def search_keys(query)\n @storage.search_keys(query)\n end", "def search_dictionary_entries(dictionary, query, opts = {})\n @transporter.read(\n :POST,\n path_encode('/1/dictionaries/%s/search', dictionary),\n { query: query },\n opts\n )\n end", "def search\n end", "def search\n end", "def search\n end", "def search\n end", "def search\n end", "def search\n end", "def search\n end", "def search\n end", "def search\n end", "def search\n end", "def search\n conditions = Kokyaku.where(\"\\\"delFlg\\\" = ?\", 0)\n conditions = conditions.where(\"\\\"kokyakuId\\\" >= ?\", params[:kokyaku][:kokyakuIdFrom].to_i) if params[:kokyaku][:kokyakuIdFrom] != \"\"\n conditions = conditions.where(\"\\\"kokyakuId\\\" <= ?\", params[:kokyaku][:kokyakuIdTo].to_i) if params[:kokyaku][:kokyakuIdTo] != \"\"\n conditions = conditions.where(\"\\\"kokyakuNm1\\\" LIKE ?\", \"%\" + params[:kokyaku][:kokyakuNm1] + \"%\") if params[:kokyaku][:kokyakuNm1] != \"\"\n conditions = conditions.where(\"\\\"kokyakuNm2\\\" LIKE ?\", \"%\" + params[:kokyaku][:kokyakuNm2] + \"%\") if params[:kokyaku][:kokyakuNm2] != \"\"\n conditions = conditions.where(\"\\\"kokyakuNmKana1\\\" LIKE ?\", \"%\" + params[:kokyaku][:kokyakuNmKana1] + \"%\") if params[:kokyaku][:kokyakuNmKana1] != \"\"\n conditions = conditions.where(\"\\\"kokyakuNmKana2\\\" LIKE ?\", \"%\" + params[:kokyaku][:kokyakuNmKana2] + \"%\") if params[:kokyaku][:kokyakuNmKana2] != \"\"\n conditions = conditions.where(\"\\\"seibetsu\\\" = ?\", params[:kokyaku][:seibetsu]) if params[:kokyaku][:seibetsu] != \"\"\n\n # 生年月日は「元号」「年」「月」「日」を連結して比較する\n adapter = Rails.configuration.database_configuration[Rails.env]['adapter']\n logger.debug(adapter)\n if adapter == \"sqlite3\" then\n # for sqlite\n tanjoDtCondition = str_sql_concat(\"SUBSTR('0'||\\\"tanjoGengo\\\",-1,1)\", \"SUBSTR('00'||\\\"tanjoYear\\\",-2,2)\", \"SUBSTR('00'||\\\"tanjoMonth\\\",-2,2)\", \"SUBSTR('00'||\\\"tanjoDay\\\",-2,2)\")\n else\n # for mysql、postgres\n tanjoDtCondition = str_sql_concat(\"LPAD(CAST(\\\"tanjoGengo\\\" AS char), 1, '0') \", \" LPAD(CAST(\\\"tanjoYear\\\" AS char), 2, '0') \", \" LPAD(CAST(\\\"tanjoMonth\\\" AS char), 2, '0') \", \" LPAD(CAST(\\\"tanjoDay\\\" AS char), 2, '0')\")\n end\n\n if adapter == \"mysql2\" then\n # for mysql\n strIntegerType = \"SIGNED\"\n else\n # for sqlite、postgres\n strIntegerType = \"INTEGER\"\n end\n\n if params[:kokyaku][:tanjoGengoFrom].present? || params[:kokyaku][:tanjoYearFrom].present? || params[:kokyaku][:tanjoMonthFrom].present? || params[:kokyaku][:tanjoDayFrom].present?\n tanjoGengoFrom = \"0\"\n tanjoYearFrom = \"00\"\n tanjoMonthFrom = \"00\"\n tanjoDayFrom = \"00\"\n\n if params[:kokyaku][:tanjoGengoFrom].present?\n tanjoGengoFrom = format(\"%01d\", params[:kokyaku][:tanjoGengoFrom])\n end\n if params[:kokyaku][:tanjoYearFrom].present?\n tanjoYearFrom = format(\"%02d\", params[:kokyaku][:tanjoYearFrom])\n end\n if params[:kokyaku][:tanjoMonthFrom].present?\n tanjoMonthFrom = format(\"%02d\", params[:kokyaku][:tanjoMonthFrom])\n end\n if params[:kokyaku][:tanjoDayFrom].present?\n tanjoDayFrom = format(\"%02d\", params[:kokyaku][:tanjoDayFrom])\n end\n\n tanjoDtFrom = (tanjoGengoFrom.to_s + tanjoYearFrom.to_s + tanjoMonthFrom.to_s + tanjoDayFrom.to_s).to_i\n conditions = conditions.where(\"CAST(\" + tanjoDtCondition + \" AS \" + strIntegerType + \" ) >= ?\", tanjoDtFrom)\n end\n\n if params[:kokyaku][:tanjoGengoTo].present? || params[:kokyaku][:tanjoYearTo].present? || params[:kokyaku][:tanjoMonthTo].present? || params[:kokyaku][:tanjoDayTo].present?\n tanjoGengoTo = \"9\"\n tanjoYearTo = \"99\"\n tanjoMonthTo = \"99\"\n tanjoDayTo = \"99\"\n\n if params[:kokyaku][:tanjoGengoTo].present?\n tanjoGengoTo = format(\"%01d\", params[:kokyaku][:tanjoGengoTo])\n end\n if params[:kokyaku][:tanjoYearTo].present?\n tanjoYearTo = format(\"%02d\", params[:kokyaku][:tanjoYearTo])\n end\n if params[:kokyaku][:tanjoMonthTo].present?\n tanjoMonthTo = format(\"%02d\", params[:kokyaku][:tanjoMonthTo])\n end\n if params[:kokyaku][:tanjoDayTo].present?\n tanjoDayTo = format(\"%02d\", params[:kokyaku][:tanjoDayTo])\n end\n\n tanjoDtTo = (tanjoGengoTo.to_s + tanjoYearTo.to_s + tanjoMonthTo.to_s + tanjoDayTo.to_s).to_i\n conditions = conditions.where(\"CAST(\" + tanjoDtCondition + \" AS \" + strIntegerType + \" ) <= ?\", tanjoDtTo)\n end\n\n conditions = conditions.where(\"\\\"postNo\\\" LIKE ?\", params[:kokyaku][:postNo] + \"%\") if params[:kokyaku][:postNo] != \"\"\n conditions = conditions.where(\"address1 LIKE ?\", \"%\" + params[:kokyaku][:address1] + \"%\") if params[:kokyaku][:address1] != \"\"\n conditions = conditions.where(\"address2 LIKE ?\", \"%\" + params[:kokyaku][:address2] + \"%\") if params[:kokyaku][:address2] != \"\"\n conditions = conditions.where(\"tel1 LIKE ?\", params[:kokyaku][:tel1] + \"%\") if params[:kokyaku][:tel1] != \"\"\n conditions = conditions.where(\"tel2 LIKE ?\", params[:kokyaku][:tel2] + \"%\") if params[:kokyaku][:tel2] != \"\"\n conditions = conditions.where(\"fax LIKE ?\", params[:kokyaku][:fax] + \"%\") if params[:kokyaku][:fax] != \"\"\n conditions = conditions.where(str_sql_concat(\"COALESCE(sb1.\\\"shobyoNm\\\", '') \", \"COALESCE(sb2.\\\"shobyoNm\\\", '') \", \"COALESCE(sb3.\\\"shobyoNm\\\", '') \") + \" LIKE ?\", \"%\" + params[:kokyaku][:shobyoNm] + \"%\") if params[:kokyaku][:shobyoNm] != \"\"\n conditions = conditions.where(\"\\\"gakkoNm\\\" LIKE ?\", \"%\" + params[:kokyaku][:gakkoNm] + \"%\") if params[:kokyaku][:gakkoNm] != \"\"\n #logger.debug(conditions)\n\n\n # 検索に必要なSQL文を取得する\n select, joins = get_select_stmt\n\n records = conditions.count(:joins => joins)\n\n limit = params[:rows].to_i\n page = params[:page].to_i\n if records > 0\n n = records.quo(limit)\n total_pages = n.ceil\n else\n total_pages = 0\n end\n start = limit * page - limit;\n @kokyakus = conditions.find(\n :all,\n :select => select,\n :joins => joins,\n # :joins => \"LEFT OUTER JOIN shobyos shobyo2 ON shobyos.shobyoCd = kokyakus.shobyouCd2\",\n # :joins => \"LEFT OUTER JOIN shobyos shobyo3 ON shobyos.shobyoCd = kokyakus.shobyouCd3\",\n # :include => [:shobyo],\n :offset => start,\n :limit => limit,\n :order => \"\\\"kokyakuId\\\" DESC\")\n\n @responce = {\n total: total_pages.to_s,\n page: params[:page],\n records: records.to_s,\n rows: @kokyakus\n }\n #logger.debug(@responce)\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @responce }\n end\n end", "def search_for_towns_and_areas\n @towns = use_cache get_controller_action_key do\n Town.all.reject{|town| town.level != 13 && town.level != 1 }\n end\n\n respond_to do |format|\n q = params[:query].mb_chars.capitalize.to_s\n @towns = @towns.select{ |t| Regexp.new(\"^#{q}.*\") =~ t.title }\n format.json\n end\n\n end", "def search\n # if nothing is in the search field, show all champions\n if params[:search].blank?\n redirect_to champions_path\n \n #otherwise if the search matches show all champions that match\n elsif params[:search]\n @parameter = params[:search].downcase\n @search_results = Champion.all.where(\"lower(name) like ?\", \"#{@parameter}%\")\n \n #if the search doesnt match a champion\n else\n flash.alert = \"Could not find a champion with that query\"\n end\n end", "def search_params search\n search = Hash.new\n [:writing, :kana, :romaji, :def_de, :def_en, :def_fr].each do |field|\n search[field] = \"%#{params[:search]}%\"\n end\n search\n end", "def search\n\n end", "def search_by_keyword(keyword)\n j = search(keyword)\n j['b'].collect { |e| e['Id'] }.map { |e| e.downcase }\n end", "def search\n\tsearch_term = \"%#{params[:query].downcase}%\"\n\t# if using search engine\n\t# musicians = User.search search_term, :conditions => { :local_player_flag => 1 }, :order => :last_name\n\n\t# SQL Search (to be replaced by search engine, depends on hosting situation)\n\tmusicians = User.where(\"local_player_flag=1 and status_id<>1 and (cell_phone is not null or home_phone is not null) and (lower(username) like ? or lower(first_name) like ? or lower(last_name) like ? )\", search_term, search_term, search_term).order(\"last_name\").select(\"username, first_name, last_name, user_id, cell_phone, url, home_phone, image, image_url\")\n\n\t# JSON output\n\trender :json => musicians\n end", "def search\n user = User.qa_app_user\n text = URI.decode(params[:search])\n @chatouts = Qa.search_feed(user, Qa::GROUP_ID , text)\n end", "def search_words\n begin\n regex = Regexp.new(@pattern)\n rescue RegexpError => msg\n error_msg(msg)\n rescue NameError => msg\n error_msg(msg)\n end\n @results = DICT.select do |word|\n regex =~ word\n end\n @num_results = @results.length\n format_results\n display_results\n end", "def search\n\n \t@matches = []\n\n\t\tpattern = params[:search].downcase\n \tfind = Regexp.new(Regexp.quote(pattern))\n\t\t\n \tPlayer.all.each do |p|\t\t\t\n\t\t\tplayer_matches = false\n \tif p.name.downcase =~ find\n \t\tplayer_matches = true\n \t\t@matches << [p.name]\n \t\tbreak\n \tend\n end\n \n \trender :text => @matches.to_json()\n \tend", "def search\n data.select { |raw| raw.values.join(',').match?(/#{search_query}/) }\n end", "def index()\n @counter = 0\n searchword = params[:search]\n searchgenre = params[:selected_val]\n\n if searchword\n if searchgenre == \"内容\"\n @plans = Plan.where([\"content LIKE ?\", \"%#{searchword}%\"]) #コンテント用\n elsif searchgenre == \"ハッシュタグ\"\n @plans = Plan.where([\"content LIKE ?\", \"%##{searchword}%\"]) #ハッシュタグ用\n end\n render\n else\n @plans = Plan.all\n end\n end", "def search(q)\n url = build_query_url q\n begin\n raw_response = json(url)\n rescue JSON::ParserError\n Rails.logger.info \"Could not parse response as JSON. Request url: #{url}\"\n return []\n end\n parse_authority_response(raw_response)\n end", "def search keyword, opts = {}\n get '/v1/search/illust', {\n **SEARCH_DEFAULT_OPT,\n word: keyword,\n **opts\n }\n end", "def index\n\n @title = t(\"h2.categories\")\n @search = Category.search(params[:search])\n\n p @search\n unless params[:keyword].blank?\n p 222222222222222222222\n name = split_keyword(params[:keyword])\n \n @search.name_like = params[:keyword]\n # @search.or_content_like = name\n # @search.or_email_like = name\n end\n\n# Article.where((:name =~ 'Hello%') | (:content =~ 'Goodbye%')).to_sql\n\n\n @categories = @search.paginate(:page => params[:page])\n\n # @categories = Category.where(:name => '123').paginate(:page => params[:page] || 1, :per_page => params[:per_page] || 1)\n # @categories = Category.find(:all ,:conditions => [\"name like ? \" ,'%12%']).paginate(:page => params[:page] || 1, :per_page => params[:per_page] || 1)\n respond_to do |format|\n format.html # index.html.erb\n format.xml { render :xml => @categories }\n end\n end", "def do_search\n @search_text = params[:q]\n\n # Doctoring for the view to find matches:\n @q = @search_text\n @q.chop! if params[:q] =~ /\\*$/\n @q = @q[1..-1] if params[:q] =~ /^\\*/\n\n # TODO: we'll want some whitelist filtering here later:\n # params[:q] = \"#{@q}*\" unless params[:q] =~ /\\*$/ or params[:q] =~ /^[-+]/ or params[:q] =~ /\\s/\n params[:q] = I18n.transliterate(params[:q]).downcase\n\n # TODO: This search suggestions block is large; extract.\n\n # First step (and, yes, this will be slow—we will optimize later), look for\n # search suggestions that match the query:\n words = params[:q].split # TODO: we might want to remove words with ^-\n # TODO: we might also want to remove stopwords e.g.: https://github.com/brenes/stopwords-filter\n suggestions = []\n # YUCK! This is the best way to do this in Searchkick at the moment, though.\n # :S\n words.each do |word|\n word_search = SearchSuggestion.search(word, fields: [{ match: :exact }])\n suggestions += word_search.results if word_search.respond_to?(:results)\n end\n\n # If we only found one thing and they only asked for one thing:\n if suggestions.size == 1 && params[:q] !~ /\\s/\n Rails.logger.warn(\"One suggestion.\")\n # TODO: move this to a helper? It can't go on the model...\n suggestion = suggestions.first\n suggestion = suggestion.synonym_of if suggestion.synonym_of\n where = case suggestion.type\n when :page\n suggestion.page\n when :object_term\n term_records_path(uri: suggestion.object_term, object: true)\n when :path\n suggestion.path\n when :wkt_string\n flash[:notice] = \"Unimplemented, sorry.\"\n \"/\"\n end\n return redirect_to(where)\n elsif suggestions.size >= 2 && params[:q] =~ /\\s/\n Rails.logger.warn(\"Multiple suggestions.\")\n groups = suggestions.group_by(&:type)\n # Easier to handle:\n groups[:page] ||= []\n groups[:object_term] ||= []\n groups[:path] ||= []\n groups[:wkt_string] ||= []\n if groups[:page].size > 1\n Rails.logger.warn(\"Multiple PAGE suggestions.\")\n # We can't use suggestions if there's more than one species. Sorry.\n flash[:notice] = t(\"search.flash.more_than_one_species\",\n species: groups[:page].map(&:match).to_sentence)\n else\n Rails.logger.warn(\"0 or 1 Page suggestions.\")\n clade = groups[:page].try(:first).try(:page_id)\n Rails.logger.warn(\"Page suggestion: #{clade}\") if clade\n if groups[:object_term].size > 2\n Rails.logger.warn(\"Over two TERM suggestions.\")\n flash[:notice] = t(\"search.flash.more_than_two_terms\",\n terms: groups[:object_term].map(&:match).to_sentence)\n elsif groups[:path].size > 0\n Rails.logger.warn(\"...had PATH suggestions.\")\n flash[:notice] = t(\"search.flash.cannot_combine_paths\",\n path: groups[:path].map(&:match).to_sentence)\n else # NOTE: this assumes we only have OBJECT term suggestions, not predicates.\n Rails.logger.warn(\"Usable suggestions...\")\n (first, second) = groups[:object_term] # Arbitrary which is first...\n Rails.logger.warn(\"First term: #{first.object_term}\")\n Rails.logger.warn(\"Second term: #{second.object_term}\") if second\n return redirect_to(term_records_path(uri: first.object_term, object: true,\n and_object: second.try(:object_term), clade: clade))\n end\n end\n end\n\n @clade = if params[:clade]\n puts \"*\" * 100\n puts \"** Filtering by clade #{params[:clade]}\"\n # It doesn't make sense to filter some things by clade:\n params[:only] = if params[:only]\n Array(params[:only]) - [:collections, :users, :predicates, :object_terms]\n else\n [:pages, :media]\n end\n puts \"Only param should now be: #{params[:only]}\"\n Page.find(params[:clade])\n else\n nil\n end\n\n default = params.has_key?(:only)? false : true\n @types = {}\n [ :pages, :collections, :articles, :images, :videos, :videos, :sounds, :links, :users, :predicates, :object_terms ].\n each do |sym|\n @types[sym] = default\n end\n\n @types[params[:only].to_sym] = true if params.has_key?(:only)\n\n # if params.has_key?(:only)\n # Array(params[:only]).each { |type| @types[type.to_sym] = true }\n # elsif params.has_key?(:except)\n # Array(params[:except]).each { |type| @types[type.to_sym] = false }\n # end\n\n # NOTE: no search is performed unless the @types hash indicates a search for\n # that class is required:\n\n @pages = if @types[:pages]\n fields = %w[preferred_vernacular_strings^20 vernacular_strings^20 preferred_scientific_names^10 scientific_name^10 synonyms^10 providers resource_pks]\n match = words.size == 1 ? :text_start : :phrase\n basic_search(Page, boost_by: [:page_richness, :specificity, :depth], match: match, fields: fields,\n where: @clade ? { ancestry_ids: @clade.id } : nil,\n includes: [:preferred_vernaculars, :medium, { native_node: { node_ancestors: :ancestor } }])\n else\n nil\n end\n\n\n @collections = if @types[:collections]\n basic_search(Collection, fields: [\"name^5\", \"description\"])\n else\n nil\n end\n\n @articles = if @types[:articles]\n basic_search(Searchkick,\n fields: [\"name^5\", \"resource_pk^10\", \"owner\", \"description^2\"],\n where: @clade ? { ancestry_ids: @clade.id } : nil,\n index_name: [Article])\n else\n nil\n end\n\n @images = if @types[:images]\n media_search(\"image\")\n else\n nil\n end\n\n @videos = if @types[:videos]\n media_search(\"video\")\n else\n nil\n end\n\n @sounds = if @types[:sounds]\n media_search(\"sound\")\n else\n nil\n end\n\n # @links = if @types[:links]\n # basic_search(Searchkick,\n # fields: [\"name^5\", \"resource_pk^10\", \"owner\", \"description^2\"],\n # where: @clade ? { ancestry_ids: @clade.id } : nil,\n # index_name: [Link])\n # else\n # nil\n # end\n\n @users = if @types[:users]\n basic_search(User, fields: [\"username^6\", \"name^4\", \"tag_line\", \"bio^2\"])\n else\n nil\n end\n\n Searchkick.multi_search([@pages, @articles, @images, @videos, @sounds, @collections, @users].compact)\n\n @pages = PageSearchDecorator.decorate_collection(@pages) if @pages\n @articles = ArticleSearchDecorator.decorate_collection(@articles) if @articles\n @images = ImageSearchDecorator.decorate_collection(@images) if @images\n @videos = VideoSearchDecorator.decorate_collection(@videos) if @videos\n @sounds = SoundSearchDecorator.decorate_collection(@sounds) if @sounds\n @collections = CollectionSearchDecorator.decorate_collection(@collections) if @collections\n @users = UserSearchDecorator.decorate_collection(@users) if @users\n\n # if @types[:predicates]\n # @predicates_count = TraitBank.count_predicate_terms(@q)\n # # NOTE we use @q here because it has no wildcard.\n # @predicates = Kaminari.paginate_array(\n # TraitBank.search_predicate_terms(@q, params[:page], params[:per_page]),\n # total_count: @predicates_count\n # ).page(params[:page]).per(params[:per_page] || 50)\n # end\n #\n # if @types[:object_terms]\n # @object_terms_count = TraitBank.count_object_terms(@q)\n # # NOTE we use @q here because it has no wildcard.\n # @object_terms = Kaminari.paginate_array(\n # TraitBank.search_object_terms(@q, params[:page], params[:per_page]),\n # total_count: @object_terms_count\n # ).page(params[:page]).per(params[:per_page] || 50)\n # end\n\n respond_to do |fmt|\n fmt.html do\n @page_title = t(:page_title_search, query: @q)\n end\n\n fmt.js { }\n\n # TODO: JSON results for other types! TODO: move; this is view logic...\n fmt.json do\n render json: JSON.pretty_generate(@pages.results.as_json(\n except: :native_node_id,\n methods: :scientific_name,\n include: {\n preferred_vernaculars: { only: [:string],\n include: { language: { only: :code } } },\n # NOTE I'm excluding a lot more for search than you would want for\n # the basic page json:\n top_media: { only: [ :id, :guid, :owner, :name ],\n methods: [:small_icon_url, :medium_icon_url],\n include: { provider: { only: [:id, :name] },\n license: { only: [:id, :name, :icon_url] } } }\n }\n ))\n end\n end\n end", "def index\n @q = Hotel.ransack(search_params)\n\n search_params.each_pair { |key, value|\n @key = \"#{key}\"\n @value = \"#{value}\"\n break # 先頭の条件のみ\n }\n\n sql = @q.result.to_sql\n if params[:q][\"s\"] then\n sql = sql + \" order by \"+ params[:q][\"s\"]\n sql.gsub!(\"\\\"\",\"\")\n else\n sql = sql + \" order by updated_at desc limit 10\"\n end\n @hotels = Hotel.find_by_sql(sql)\n # if @hotels then\n # @message = \"お探しの民泊施設は見つかりませんでした\"\n # end\n # @hotels = Hotel.all\n @comment = Comment.new\n end", "def search(word)\r\n \r\n end", "def search(query='*', opts={})\n\n defaults = {\n :search_category => 'all',\n :load_search_page => true\n }\n \n opts = defaults.merge(opts)\n\n if (opts[:load_search_page])\n @request.add('/var/widgets.json?callback=define')\n @request.add('/system/me?_charset_=utf-8')\n @request.add('/var/templates/worlds.2.json?_charset_=utf-8')\n end\n \n case opts[:search_category]\n when 'all'\n @request.add(\"/var/search/general.json?q=#{query}&tags=&sortOn=_lastModified&sortOrder=desc&page=0&items=18&_charset_=utf-8&_=1342558141063\",\n {}, { 'subst' => 'true' })\n when 'content'\n @request.add(\"/var/search/pool/all.infinity.json?q=#{query}&tags=&sortOn=_lastModified&sortOrder=desc&page=0&items=18&_charset_=utf-8&_=1342558155346\",\n {}, { 'subst' => 'true' })\n when 'people'\n @request.add(\"/var/search/users.infinity.json?q=#{query}&tags=&sortOn=_lastModified&sortOrder=desc&page=0&items=18&_charset_=utf-8&_=1342558158607\",\n {}, { 'subst' => 'true' })\n when 'groups'\n @request.add(\"/var/search/groups.infinity.json?q=#{query}&tags=&sortOn=_lastModified&sortOrder=desc&category=group&page=0&items=18&_charset_=utf-8&_=1342558161747\",\n {}, { 'subst' => 'true' })\n when 'courses'\n @request.add(\"/var/search/groups.infinity.json?q=#{query}&tags=&sortOn=_lastModified&sortOrder=desc&category=course&page=0&items=18&_charset_=utf-8&_=1342558164687\",\n {}, { 'subst' => 'true' })\n when 'research_projects'\n @request.add(\"/var/search/groups.infinity.json?q=#{query}&tags=&sortOn=_lastModified&sortOrder=desc&category=research&page=0&items=18&_charset_=utf-8&_=1342558167607\",\n {}, { 'subst' => 'true' })\n end\n \n if (opts[:load_search_page])\n @request.add('/system/batch?_charset_=utf-8&requests=%5B%7B%22url%22%3A%22%2Fdevwidgets%2Fdisplayprofilesection%2Fdisplayprofilesection.html%22%2C%22method%22%3A%22GET%22%2C%22_charset_%22%3A%22utf-8%22%7D%2C%7B%22url%22%3A%22%2Fdevwidgets%2Fdisplayprofilesection%2Fbundles%2Fdefault.properties%22%2C%22method%22%3A%22GET%22%2C%22_charset_%22%3A%22utf-8%22%7D%5D&_=1324426870134')\n @request.add('/system/batch?_charset_=utf-8&requests=%5B%7B%22url%22%3A%22%2Fdevwidgets%2Fprofilesection%2Fprofilesection.html%22%2C%22method%22%3A%22GET%22%2C%22_charset_%22%3A%22utf-8%22%7D%2C%7B%22url%22%3A%22%2Fdevwidgets%2Fprofilesection%2Fbundles%2Fdefault.properties%22%2C%22method%22%3A%22GET%22%2C%22_charset_%22%3A%22utf-8%22%7D%5D&_=1324426870481')\n end\n \n end", "def search(q:, raw: false)\n return unless String(q).length > 10\n\n result = Geocoder.search(q)\n raw ? result&.data : format_result(result)\n end", "def linear_search(input)\n \n search_name = input.downcase.split(' ')\n search_name.each do |name_el|\n \n entries.each do |entry|\n \n name_array = entry.name.downcase.split(' ')\n \n if name_array.include?(name_el)\n return entry\n end\n end\n end\n return nil\n end", "def index\n key = params[:key].try(:strip)\n search = params[:search].try(:strip)\n \n # if no search term is present\n if search.blank? || key.blank?\n @records = current_user.records.order(\"updated_at desc\").paginate(:page => params[:page])\n # else perform full text search\n else\n @records = current_user.records.where(\"to_tsvector(jsonb_extract_path_text(json, ?)) @@ plainto_tsquery(?)\", key, search).order(\"updated_at desc\").paginate(:page => params[:page])\n # save key to cookie\n session[:search_key] = key\n end\n end", "def url_search\n params[:q].gsub(\"*\", \"%2A\")\n end", "def search(term)\n term ||= ''\n normalized_term = term.downcase.strip\n indexes = INVERTED_INDEX[normalized_term]\n indexes ? ISO_639_2.values_at(*indexes).uniq : []\n end", "def search(term)\n term ||= ''\n normalized_term = term.downcase.strip\n indexes = INVERTED_INDEX[normalized_term]\n indexes ? ISO_639_2.values_at(*indexes).uniq : []\n end", "def search\n\n \tparser = AddressParser.new\n \taddress_hash = {}\n \t\n \tunless params[:location].nil?\n\t \tif is_number?(params[:location]) then \n\t \t\t# zip code\n\t\t \taddress_hash = parser.zip.parse(params[:location])\n\t\telsif params[:location].length == 2 then\n\t\t\t# state only\n\t\t\tparams[:location] = params[:location].strip.upcase\n\t\t\taddress_hash = parser.state.parse(params[:location])\n\t\telse\n\t\t\t# city, state, zip\n\t\t\tbegin address_hash = parser.csz.parse(params[:location])\n\t\t\t\trescue\n\t\t\t\t# city\n\t\t\t \taddress_hash = parser.city1.parse(params[:location]) unless params[:location].nil?\n\t\t\tend \n\t\tend\n\tend\n\t\n\t# new hash for search keys\n\tskeys = {}\n\t\n\tif address_hash[:city] then \n\t\tskeys[\"CIT\"] = /#{address_hash[:city]}/i\n\t\tparams['city'] = address_hash[:city]\n\tend\n\tif address_hash[:city1] then \n\t\tskeys[\"CIT\"] = /#{address_hash[:city1]}/i\n\t\tparams['city'] = address_hash[:city]\n\tend\n \tif address_hash[:state] then \n \t\tskeys[\"STATE\"] = \"#{address_hash[:state]}\" \n \t\tparams['state'] = address_hash[:state]\n \tend\n \tif address_hash[:zip] then \n \t\tskeys[\"ZP\"] = address_hash[:zip].to_s \n \t\t#params['zip'] = CGI::escape(address_hash[:zip])\n \t\tparams['zip'] = address_hash[:zip]\n \tend\n \t\n \tif(params.has_key?('bathrooms') ) then\n \t\tskeys[\"BTH\"] = \"#{params[:bathrooms].to_f}\"\n \tend\n \t\n \tif(params.has_key?('bedrooms') ) then\n \t\tskeys[\"BR\"] = params[:bedrooms].to_i\n \tend\n\n \tif(params.has_key?('minyear') ) then\n \t\tskeys[\"BLT-MIN\"] = params[:minyear].to_i\n \tend\n \t\n \tif(params.has_key?('maxyear') ) then\n \t\tskeys[\"BLT-MAX\"] = params[:minyear].to_i\n \tend\n \t\n \tif(params.has_key?('price_low') ) then\n \t\tskeys[\"LP-MIN\"] = params[:price_low].to_i\n \tend\n \t\n \tif(params.has_key?('price_high') ) then\n \t\tskeys[\"LP-MAX\"] = params[:price_high].to_i\n \tend\n \t\n \tif(params.has_key?('zipcode') ) then\n \t\tskeys[\"ZP\"] = params[:zipcode]\n \tend\n\t\n\tquery = {}\n\n\tskeys.each do |key, value|\n\t\tcase key\n\t\t\t when 'BTH'\n\t\t\t query.merge!({ key.to_sym.gte => value.to_f })\n\t\t\t when 'BR'\n\t\t\t query.merge!({ key.to_sym.gte => value.to_i })\n\t\t\t when 'BLT-MIN'\n\t\t\t \tif value.to_i > 0 then\n\t\t\t \t\tquery.merge!({ 'BLT'.to_sym.gte => value.to_i })\n\t\t\t \tend\n\t\t\t when 'BLT-MAX'\n\t\t\t \tif value.to_i > 0 then\n\t\t\t \t\tquery.merge!({ 'BLT'.to_sym.lte => value.to_i })\n\t\t\t \tend\n\t\t\t when 'LP-MIN'\n\t\t\t \tif value.to_i > 0 then\n\t\t\t \t\tquery.merge!({ 'LP'.to_sym.gte => value.to_i })\n\t\t\t \tend\n\t\t\t when 'LP-MAX'\n\t\t\t \tif value.to_i > 0 then\n\t\t\t \t\tquery.merge!({ 'LP'.to_sym.lte => value.to_i })\n\t\t\t \tend\n\t\t\t when 'ZP'\n\t\t\t \tif value != \"\" then\n\t\t\t \t\tquery.merge!({ 'ZP'.to_sym => value })\n\t\t\t \tend\n\t\t\t when 'CIT', 'STATE', 'ZP'\n\t\t\t query.merge!({ key.to_sym => value })\n\t\t end\n\tend\n\t\t\n \t@listings = Listing.where( query ).paginate({\n\t\t :sort => :LP.desc,\n\t\t :per_page => 10, \n\t\t :page => params[:page],\n\t\t})\n \t\n \trender :template => 'find/search', :collection => @listings\n \t \t\n end", "def search_results\n @keyword = params[:keyword]\n \n @products = Product.where(\"name LIKE ?\", \"%#{@keyword}%\")\n end", "def search\n search_params = \"%#{params[:search_params]}%\"\n @code_programs = CodeProgram.where(\"city like ? OR keywords like ? OR mission_description like? OR organization_name like?\", search_params, search_params, search_params, search_params)\n puts \"yo\"\n p @code_programs\n render :index\n end", "def search_process\n @search_text =params[:q].to_s\n all =params[:all].to_s\n exact =params[:exact].to_s\n any =params[:any].to_s\n none =params[:none].to_s\n advanced_query=\"\"\n\n if all != \"\"\n all =all.split(' ')\n all_like =all.map { |x| \"keyword like \" + \"'%\" + x + \"%'\" }\n all_like =all_like.join(' and ')\n advanced_query=all_like\n end\n\n if exact != \"\" && all != \"\"\n exact =\"'%\"+exact+\"%'\"\n advanced_query = advanced_query + \" and keyword like \" + exact\n end\n\n if exact != \"\" && all == \"\"\n exact =\"'%\"+exact+\"%'\"\n advanced_query = \"keyword like \" + exact\n end\n\n if any != \"\" and (all != \"\" or exact != \"\")\n any =any.split(' ')\n any_like =any.map { |x| \"keyword like \" + \"'%\" + x + \"%'\" }\n any_like =any_like.join(' or ')\n advanced_query = advanced_query + \" and (\" + any_like + \")\"\n end\n\n if any != \"\" and all == \"\" and exact == \"\"\n any =any.split(' ')\n any_like =any.map { |x| \"keyword like \" + \"'%\" + x + \"%'\" }\n any_like =any_like.join(' or ')\n advanced_query = \"(\" + any_like + \")\"\n end\n\n if none != \"\" and (all != \"\" or exact != \"\" or any != \"\")\n none =none.split(' ')\n none_not_like=none.map { |x| \"keyword not like \" + \"'%\" + x + \"%'\" }\n\n none_not_like=none_not_like.join(' and ')\n\n advanced_query=advanced_query + \" and \" + none_not_like\n\n end\n\n if none != \"\" and all == \"\" and exact == \"\" and any == \"\"\n none =none.split(' ')\n none_not_like=none.map { |x| \"keyword not like \" + \"'%\" + x + \"%'\" }\n\n none_not_like=none_not_like.join(' and ')\n\n advanced_query= none_not_like\n end\n\n\n advanced_query = \"SELECT Model_ID FROM keyword_symbol_tables WHERE \"+advanced_query\n\n parameter_search_text=@search_text.split.join(\" \")\n keyword_array =parameter_search_text.split(' ')\n keyword_count =keyword_array.size\n\n connection = ActiveRecord::Base.connection\n\n if all != \"\" or exact != \"\" or any != \"\" or none != \"\"\n @resultset = connection.execute(\"#{advanced_query}\");\n else\n @resultset = connection.execute(\"call keyword_search('#{parameter_search_text}',#{keyword_count})\");\n end\n\n ActiveRecord::Base.clear_active_connections!\n\n @resultset_strings = @resultset.map { |result| result.to_s.gsub(/[^0-9A-Za-z]/, '') }\n\n @model_ids =Array.new\n @model_names =Array.new\n @model_types =Array.new\n\n @resultset_strings.each do |result|\n\n substring=result[0..4]\n\n if substring == \"NMLCL\"\n cell=Cell.find_by_Cell_ID(result.to_s)\n name=cell.Cell_Name\n type=\"Cell\"\n end\n\n if substring == \"NMLCH\"\n channel=Channel.find_by_Channel_ID(result.to_s)\n name =channel.Channel_Name\n type =\"Channel\"\n end\n\n\n if substring == \"NMLNT\"\n network=Network.find_by_Network_ID(result.to_s)\n name =network.Network_Name\n type =\"Network\"\n end\n\n if substring == \"NMLSY\"\n synapse=Synapse.find_by_Synapse_ID(result.to_s)\n name =synapse.Synapse_Name\n type =\"Synapse\"\n end\n\n @model_ids.push(result)\n @model_names.push(name)\n @model_types.push(type)\n\n end\n\n if @model_ids.count != 0\n\n render :partial => 'keyword_results_list',\n :locals => {\n :model_ids => @model_ids,\n :model_names => @model_names,\n :model_types => @model_types\n }\n\n else\n\n render :partial => 'no_results'\n\n end\n\n end", "def search\n Api.search_all_apis params[:postcode]\n end", "def search(plaintext)\n call(:search, :plaintext => plaintext)[:search_response][:return]\n end", "def name_search # :nologin: :norobots:\n pattern = params[:pattern].to_s\n if pattern.match(/^\\d+$/) and\n (name = Name.safe_find(pattern))\n redirect_to(:action => 'show_name', :id => name.id)\n else\n query = create_query(:Name, :pattern_search, :pattern => pattern)\n @suggest_alternate_spellings = pattern\n show_selected_names(query)\n end\n end", "def condicion_search(pparams, phash, pmodel)\n cp = 0\n pv = 0\n cad = ' '\n\n pparams.each do |k,v|\n if (k != 'utf8' and k != 'action' and k != 'controller' and k != 'srchmodel' and k != 'page') \n\t\n\t if v.rstrip != ''\n\t\t\n\t \tif pv > 0\n\t\t\tcad += \" AND \"\n\t\tend\n\t\t\n\t\tif k.to_s == \"searchfield\"\n\t\t \n \t\t if es_entero(v)\n\t\t \tif pmodel.hsearch_fields[0]==\"codigo\" or pmodel.hsearch_fields[0]==\"cliente_cod\"\n\t\t\t cad += pmodel.hsearch_fields[0]+\" = '#{v}' \"\n\t\t\telse\n\t\t\t cad += pmodel.hsearch_fields[0]+\" = #{v} \"\n\t\t\tend\n\t\t else\n\t\t sr = v.to_s\n\t\t\tif pmodel.hsearch_fields[0]==\"codigo\"\n\t\t\t cad += pmodel.hsearch_fields[1]+\" LIKE '%#{sr.capitalize}%'\"\t\n\t\t\telse \n\t\t\t cad += pmodel.hsearch_fields[1]+\"||to_char(\"+pmodel.hsearch_fields[0]+\",'99999') LIKE '%#{sr.capitalize}%'\"\t\n\t\t\tend\t \n\t\t end\n\t\n\t\telse #si no es searchfield\n\t \t\n \t\t tipo = phash[k].type\n\t\t case tipo\n\t\t when :string, :text\n\t\t sr = v.to_s\n\t\t\t if k.to_s == \"codigo\" or k.to_s == \"cliente_cod\" \n\t\t\t\t cad += \"upper(\"+k + \") = '#{sr.upcase}'\"\n\t\t\t else\n\t\t\t\t cad += \"upper(\"+k + \") like '%#{sr.upcase}%'\" \n\t\t\t end\t\t\n\t\t when :date, :datetime, :timestamp\n\t\t\t cad += k + \" = '#{v}'\"\n\t\t else\n\t\t\t cad += k + \" = #{v} \"\n\t end\n\t\tend #del searchfield\n\t\tpv += 1\n\t end \n\t cp += 1\n end\n end #del each\n \n if cp == 0\n \" 1 = 0\"\n else\n if pv == 0\n\t \" 1 = 1 \"\n else\n\t cad\n end \n end \nend", "def search\n begin\n words= params[:search][:qw].strip\n @query_info=words\n\n # search in the descriptions\n @videos=VMetadata.search(words, :page => params[:page], :per_page => @@per_page,\n :match_mode => :any, :rank_mode => :proximity_bm25)\n\n respond_to do |format|\n format.html { render 'query/show' }\n end\n rescue ActiveRecord::RecordNotFound\n render(:file => \"#{Rails.root}/public/404.html\",\n :status => \"404 Not Found\")\n end\n end", "def prefix_search\n @tags = Tag.where(\"name LIKE :prefix\", prefix: \"#{params.permit(:s)[:s]}%\")\n render json: @tags.collect{|tag| tag.strip}\n end", "def search(q)\n results = []\n url = \"https://#{@subdomain}.sharefile.com/rest/search.aspx?op=search&query=#{q}&authid=#{@authid}&fmt=json\"\n response = JSON.parse(open(url).read)\n if response[\"error\"] == false #success\n response[\"value\"].each do |item|\n if item[\"type\"] == \"folder\"\n results << Folder.new(item[\"id\"], @authid, @subdomain, false, item)\n elsif item[\"type\"] == \"file\"\n results << File.new(item[\"id\"], @authid, @subdomain, item)\n end\n end\n return results\n else #error\n return response\n end\n end", "def search(query)\n alert_setup_incomplete && return unless is_setup_ok?\n client = get_client\n query = \"tag:#{query}\" if options[:tag]\n client.search query\n end", "def search_by_keyword(query, o={})\n #debugger\n #debug \"[search_by_keyword] query = #{query}\"\n result = Sunspot.search(Item) do\n keywords query\n if o[:doc_only]\n without :itype_str, Item::ITYPE_CONCEPT#['query','concept','tag']\n end\n #debugger\n o.find_all{|k,v|k.to_s =~ /^facet\\_/}.each do |e|\n #debugger\n with (e[0].to_s.split('_')[1..-1].join('_')).to_sym, e[1] if [e[1]].flatten.first != '-1'\n end\n #debugger\n order_by(:basetime, :desc) if o[:order] == \"recency\" || query == TEXT_DUMMY\n paginate(:page => o[:page], :per_page => o[:per_page]) if o[:page]\n facet(o[:facet]) if o[:facet]\n without :hidden_flag, '1'\n end\n #debugger\n if o[:facet]\n result.facet(o[:facet]).rows\n elsif o[:raw]\n result\n else\n result_items = result.hits.map_with_index{|e,i|{:id=>e.instance.id, :rank=>(i+1), :score=>e.score}}\n @cv.add(:type=>'kwd', :query=>query, :created_at=>(o[:created_at] || Time.now), :history_id=>o[:history_id], :result=>result_items) if o[:add_context]\n result_items\n end\n end", "def searchByCity\n\t\turl = request.original_url\n\t\t\n\t\tbegin\n\n\t\t\tprms = CGI.parse(URI.parse(url).query)\n\n\n\t\t\tresults = Doctor.where(\"city LIKE ?\", \"%#{prms['city'][0]}%\")\n\n\t\t\trender json: results\n\t\trescue Exception => e\n\t\t\trender json: { errors: \"Some errors\" }, status: 422\n\t\tend\n\tend", "def search pattern\n\t\tresults = all.map {|key|\n\t\t\tkey if key.to_s =~ /#{pattern}/i\n\t\t}\n\t\tresults.delete nil\n\t\tresults\n\tend", "def search_issue(query)\n issues = load_and_cache_user_issues\n results = issues.select do |issue|\n issue['name'] =~ Regexp.new(query, 'i')\n end\n results += search_all_issues(query) if query =~ %r{\\/}\n results.uniq\n end", "def search_book(query)\n\n @book = Book.where(\"title like ?\" , query + '%')\n\n @book\n\n end" ]
[ "0.6834786", "0.6806134", "0.65217036", "0.62539136", "0.6223015", "0.6213546", "0.6148502", "0.6113235", "0.60944456", "0.6087862", "0.60752463", "0.60740215", "0.606002", "0.6004547", "0.5976069", "0.59660643", "0.59478706", "0.5946668", "0.59425825", "0.5923288", "0.59157515", "0.59066176", "0.5883347", "0.58711463", "0.58600044", "0.58582556", "0.58515066", "0.5835028", "0.5834981", "0.58328557", "0.5832233", "0.5814715", "0.58088934", "0.5800043", "0.57950187", "0.57867235", "0.5782141", "0.5773724", "0.57631636", "0.5753073", "0.57518923", "0.5743239", "0.57400084", "0.5738491", "0.5737987", "0.57278705", "0.57238305", "0.57231456", "0.5717826", "0.5717826", "0.5717826", "0.5717826", "0.5717826", "0.5717826", "0.5717826", "0.5717826", "0.5717826", "0.5717826", "0.57174975", "0.571363", "0.5699172", "0.56982535", "0.56963766", "0.5692855", "0.569252", "0.56871116", "0.5686465", "0.5686348", "0.56808347", "0.5678916", "0.5676583", "0.5661813", "0.56578535", "0.56546944", "0.565128", "0.564921", "0.56467223", "0.56435686", "0.56409204", "0.56353045", "0.5633287", "0.56326956", "0.56326956", "0.5628136", "0.56277716", "0.56261903", "0.56224006", "0.5616457", "0.5616177", "0.5614379", "0.5613842", "0.56018263", "0.56002754", "0.5599036", "0.55968857", "0.55939806", "0.5593097", "0.5591644", "0.55879474", "0.5583097" ]
0.7027388
0
Return input files based on the provided output files
def input_files @input_files ||= to_file(@args.input) end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def output_files\n @output_files ||= to_file(@args.output)\n end", "def build_input_files\n @input_files.map { |file| build_file(file) }\n end", "def files_from_generator_output(output, type = 'create')\n output.to_a.map { |line| line.scan(/#{type}\\s+([^\\s]+)$/).flatten.first }.compact.select { |f| File.exist?(f) and !File.directory?(f) }\nend", "def output_files\n @output_files ||= Fileset.new()\n end", "def input_files\n ts = prerequisite_tasks.select { |t| t.is_a? Chore }.to_a\n @input_files ||= Fileset.new(ts)\n end", "def output_files\n @output_files ||= Fileset.new\n end", "def output_files\n @output_files ||= Fileset.new\n end", "def _output_paths(file)\n input_file_dir = File.dirname(file)\n file_name = _output_filename(file)\n file_name = \"#{file_name}.erb\" if _append_html_ext_to_output_path?(file_name)\n input_file_dir = input_file_dir.gsub(Regexp.new(\"#{options[:input]}(\\/){0,1}\"), '') if options[:input]\n\n if options[:output]\n Array(options[:output]).map do |output_dir|\n File.join(output_dir, input_file_dir, file_name)\n end\n else\n if input_file_dir == ''\n [file_name]\n else\n [File.join(input_file_dir, file_name)]\n end\n end\n end", "def output_files\n get_info :output_files\n end", "def gen_input_files(fileprefix, nbr_files, nbr_floors, rate, max_time)\r\n nbr_files.times do |n|\r\n filename = fileprefix + (\"%.3d\" % n) + '.in'\r\n gen_input_file(filename, nbr_floors, rate, max_time)\r\n end\r\n end", "def files(dir = nil)\n return Fileset.from_glob(dir) unless dir.nil?\n input_files\n end", "def target_files\n files.map {|f| target_file f}\n end", "def set_files(input = \"./sample1.txt\",output = './dest1.txt')\n @input_file = input\n @output_file = output\n @extract = load_file(@input_file,PATTERNS)\n end", "def generate_downstream_files(search_engines, list_of_files, overwrite=false)\n wanted = search_engines.map do |se|\n se.search_input_extension\n end\n wanted.each do |ext|\n #####\n end\n end", "def files\n @files ||= lambda {\n sorted_relevant_files = []\n\n file_globs.each do |glob|\n current_glob_files = Pathname.glob(glob)\n relevant_glob_files = relevant_files & current_glob_files\n\n relevant_glob_files.map! do |file|\n File.new(path: file,\n namespaces: namespaces,\n decryption_keys: decryption_keys,\n encryption_keys: encryption_keys,\n signature_name: signature_name)\n end\n\n sorted_relevant_files += relevant_glob_files\n end\n\n sorted_relevant_files.uniq\n }.call\n end", "def extract_filenames(source, filepath, filelist)\n case source.class.to_s\n when 'String'\n filelist << filepath + source\n filepath = ''\n when 'Array'\n source.each do |item|\n extract_filenames(item, filepath, filelist)\n end\n when 'Hash'\n source.each do |key, value|\n filepath << key + '/'\n extract_filenames(value, filepath, filelist)\n end\n end\n filelist\n end", "def all_filenames\n\n\n # This checks for it being an array and not nil!\n # return @filenames if @filenames && [email protected]?\n\n # This means we can add files to the output\n return $filenames if $filenames && $filenames.size > 5 # I guess that small numbers are errors too\n \n if @directory\n @output_directory ||= File.join(@directory, 'Build')\n $filenames = Dir.glob(File.join(@directory, \"**/*\")).map {|file|\n next if file.start_with?(@output_directory)\n next if File.directory?(file)\n file.gsub(@directory+\"/\", \"\")\n }.compact\n else\n []\n end\n end", "def group_fastq_files starting_path, output_path, options = {:prefix => \"L\", :suffix => \".fastq.gz\", :exclude_undetermined => true}\n execute \"mkdir -p #{output_path}\"\n fastq_groups = []\n \n fastq_files = Dir.glob(File.join(starting_path, fastq_search_path))\n if fastq_files.empty?\n log \"# ERROR: no fastq files found in #{starting_path}\" if fastq_files.empty?\n else\n log \"# #{fastq_files.size} fastq files found in #{starting_path}\"\n fastq_file_data = get_file_data fastq_files, \"\\.fastq\\.gz\"\n fastq_groups = group_files fastq_file_data, output_path, options\n end\n fastq_groups\n end", "def process\n outs = []\n @infiles.each do |f|\n if f.instance_of? Hash\n dat = f[:block].(f[:file])\n if dat.instance_of? String\n out << dat + \"\\n\\n\"\n else\n fail TypeError, \"#{dat.class} returned, String expected\"\n end\n elsif f.instance_of? String\n outs << File.read(f)\n elsif f.instance_of? Array\n outs << f.map { |fn| File.read(fn) }\n else\n fail TypeError, \"Infile #{f.inspect} is not a hash nor a string nor an array!\"\n end\n end\n out = outs.join(\"\\n\\n\")\n if @block\n out = @block.(out)\n end\n if @outfile\n File.delete(@outfile) if File.exist? @outfile\n File.open(@outfile, 'w') do |f|\n f.write(out)\n end\n end\n out\n end", "def init_files(input, output)\n can_read?(input)\n\n @output_file = prepare_write_file(output)\n @error_file = prepare_write_file(ERROR_FILE)\n\n @output_file << LineParser::RESULT_HEADER.to_csv\n @error_file << LineParser::ERROR_HEADER.to_csv\n end", "def files\n ext_files = mapper.extracted_files || []\n ext_files + [mapper.zip.name.to_s]\n end", "def resource_output_paths(resource_input_paths)\n resource_input_paths.map do |resource_input_path|\n base_path = '${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}'\n extname = File.extname(resource_input_path)\n basename = extname == '.xcassets' ? 'Assets' : File.basename(resource_input_path)\n output_extension = Target.output_extension_for_resource(extname)\n File.join(base_path, File.basename(basename, extname) + output_extension)\n end.uniq\n end", "def generate_fastq\n\n # Generate FASTQ file list, expanding patterns if found.\n fastq_input_file_list = []\n fastq_output_prefix_list = []\n fastq_output_group_list = []\n ARGV.each do |fastq_input_file|\n if fastq_input_file =~ /[\\+\\?\\*]/\n # File is regexp: use it to do our own \"glob\".\n # If the regexp has at least one group in it, save the group match\n # in a corresponding list to use in making the output files.\n fastq_input_dir = File.dirname(fastq_input_file)\n fastq_input_patt = File.basename(fastq_input_file)\n\n Dir.entries(fastq_input_dir).sort().each do |entry|\n if entry =~ /#{fastq_input_patt}()/o\n fastq_input_file_list << entry\n if not @out_prefix.nil?\n fastq_output_prefix_list << @out_prefix\n else\n fastq_output_prefix_list << entry[0..Regexp.last_match.begin(1)-1-1] # Second -1 is for underline.\n end\n fastq_output_group_list << $1\n end\n end\n else\n if File.file? fastq_input_file\n fastq_input_file_list << fastq_input_file\n fastq_output_prefix_list << @out_prefix\n end\n end\n end\n\n die \"no FASTQ files found\" if fastq_input_file_list.length == 0\n\n STDERR.puts(\"Input files: #{fastq_input_file_list}\") if @verbose\n\n fastq_list = fastq_input_file_list.zip(fastq_output_prefix_list, fastq_output_group_list)\n fastq_list.each do |fastq_input_file, fastq_output_prefix, fastq_output_group|\n\n # If we are splitting to subfiles, reset the output sub filenames to\n # the new destination for the new input file; also reset statistics.\n if @save_subfiles\n if fastq_output_group == \"\"\n fastq_output_group_mod = fastq_output_group\n else\n fastq_output_group_mod = \"_#{fastq_output_group}\"\n end\n @pass_sub_filename = File.join(@pass_dir, \"#{fastq_output_prefix}_pf#{fastq_output_group_mod}.fastq\")\n @pass_sub_filename += \".gz\" if @compress\n @reject_sub_filename = File.join(@reject_dir, \"#{fastq_output_prefix}_reject#{fastq_output_group_mod}.fastq\")\n @reject_sub_filename += \".gz\" if @compress\n\n @stats_sub_filename = File.join(@stats_dir, \"#{fastq_output_prefix}_seq_stats#{fastq_output_group_mod}.txt\")\n @pass_sub_read_cnt = @reject_sub_read_cnt = @total_sub_read_cnt = 0\n end\n\n if @save_subfiles\n open_fastq_sub_output_files\n end\n\n # split one FASTQ file into post-filter and reject FASTQ\n STDERR.puts \"Processing #{fastq_input_file}...\" if @verbose\n fastq_input_fp = open_fastq_input(fastq_input_file)\n if fastq_input_fp.nil?\n warn \"#{fastq_input_file} is empty...skipping\"\n next\n end\n begin\n while fastq_input_fp.readline\n header_line = $_\n if header_line !~ /^@/\n STDERR.puts \"Missing header line (#{header_line})...exiting\"\n exit(-1)\n end\n\n header_fields = header_line.split(/[ _]/)\n die \"header parse error at #{fastq_input_file}:#{$INPUT_LINE_NUMBER} [#{header_fields.join(\"!\")}]\" if header_fields.size != 2\n\n sub_header_fields = header_fields[1].split(\":\",-1)\n die \"sub header parse error at #{fastq_input_file}:#{$INPUT_LINE_NUMBER} [#{header_fields.join(\":\")}(#{sub_header_fields.join(\":\")})]\" if sub_header_fields.size != 4\n\n @total_read_cnt += 1\n @total_sub_read_cnt += 1\n\n if sub_header_fields[1] == \"N\"\n out = @pass\n @pass_read_cnt += 1\n out_sub = @pass_sub\n @pass_sub_read_cnt += 1\n elsif sub_header_fields[1] == \"Y\"\n out = @reject\n @reject_read_cnt += 1\n out_sub = @reject_sub\n @reject_sub_read_cnt += 1\n else\n die \"filter field value error at #{fastq_input_file}:#{$INPUT_LINE_NUMBER}...skipping read\"\n out = nil\n end\n\n # Read the rest of the sequence.\n seq_line = fastq_input_fp.readline\n plus_line = fastq_input_fp.readline\n if plus_line !~ /^\\+/\n STDERR.puts \"Malformed FASTQ +line (#{plus_line})\"\n end\n qual_line = fastq_input_fp.readline\n\n # Output the sequence to whatever file was chosen above.\n if !out.nil?\n if not @remove_spaces\n out.print \"#{header_line}\"\n out_sub.print \"#{header_line}\" if not out_sub.nil?\n else\n out.puts header_fields.join(\"_\")\n out_sub.puts header_fields.join(\"_\") if not out_sub.nil?\n end\n out.print \"#{seq_line}\"\n out.print \"#{plus_line}\"\n out.print \"#{qual_line}\"\n if not out_sub.nil?\n out_sub.print \"#{seq_line}\"\n out_sub.print \"#{plus_line}\"\n out_sub.print \"#{qual_line}\"\n end\n end\n end # while\n\n rescue EOFError\n\n end\n\n fastq_input_fp.close()\n\n if @save_subfiles\n close_fastq_sub_output_files\n store_stats @stats_sub_filename, @pass_sub_read_cnt, @reject_sub_read_cnt, @total_sub_read_cnt\n end\n\n end # fastq_list.each\n end", "def autogradeInputFiles(assessmentDir)\n # Absolute path names on the local autolab server of the input\n # autograding input files: 1) The student's handin file, 2)\n # The makefile that runs the process, 3) The tarfile with all\n # of files needed by the autograder. Can be overridden in the\n # lab config file.\n localHandin = File.join(assessmentDir, @assessment.handin_directory, \n @submission.filename)\n localMakefile = File.join(assessmentDir, \"autograde-Makefile\")\n localAutograde = File.join(assessmentDir, \"autograde.tar\")\n\n # Name of the handin file on the destination machine\n destHandin = @assessment.handin_filename\n\n # Construct the array of input files.\n handin = {\"localFile\" => localHandin, \"destFile\" => destHandin}\n makefile = {\"localFile\" => localMakefile, \"destFile\" => \"Makefile\"}\n autograde = {\"localFile\" => localAutograde, \"destFile\" => \"autograde.tar\"}\n\n return [handin, makefile, autograde]\n end", "def files\n file_sets.map{|fs| fs.files }.flatten\n end", "def copyOutputFiles(fromDir, filePattern, outDir)\n\tFileUtils.mkdir_p outDir unless exists?(outDir)\n\tDir.glob(File.join(fromDir, filePattern)){|file|\n\t\tcopy(file, outDir) if File.file?(file)\n\t}\nend", "def takeFilesNames\nDir['result*.*'].each do |file_name|\n @files_names.push(file_name)\nend\nend", "def getFiles(theArgs)\n\n\ttheFiles = [];\n\tpathsExclude = theArgs[:exclude];\n\n\ttheArgs[:paths].each do |pathRoot|\n\t\n\t\tif (File.exist?(pathRoot))\n\t\t\tFind.find(pathRoot) do |thePath|\n\t\t\t\tif (File.file?(thePath))\n\n\t\t\t\t\tif (!pathsExclude.include?(thePath))\n\t\t\t\t\t\tif (!FILES_EXCLUDE.include?(File.basename(thePath)))\n\t\t\t\t\t\t\ttheFiles << thePath;\n\t\t\t\t\t\tend\n\t\t\t\t\tend\n\n\t\t\t\tend\n\t\t\tend\n\t\telse\n\t\t\tputs \"Skipping #{pathRoot}, file not found\";\n\t\tend\n\tend\n\n\treturn theFiles;\n\nend", "def files_from_sources(sources)\n ext_glob = Array(self.class.source_ext).join(',')\n sources.flatten.map { |source| File.directory?(source) ? FileList[\"#{source}/**/*.{#{ext_glob}}\"] : source }.\n flatten.reject { |file| File.directory?(file) }.map { |file| File.expand_path(file) }.uniq\n end", "def set_input_output_paths(phase, input_paths_by_config, output_paths_by_config)\n if input_output_paths_use_filelist?(phase)\n [input_paths_by_config, output_paths_by_config].each do |hash|\n hash.each do |file_list, files|\n generator = Generator::FileList.new(files)\n Xcode::PodsProjectGenerator::TargetInstallerHelper.update_changed_file(generator, file_list.file_list_path)\n end\n end\n\n phase.input_paths = nil\n phase.output_paths = nil\n phase.input_file_list_paths = input_paths_by_config.each_key.map(&:file_list_relative_path).uniq\n phase.output_file_list_paths = output_paths_by_config.each_key.map(&:file_list_relative_path).uniq\n else\n input_paths = input_paths_by_config.values.flatten(1).uniq\n output_paths = output_paths_by_config.values.flatten(1).uniq\n TargetIntegrator.validate_input_output_path_limit(input_paths, output_paths)\n\n phase.input_paths = input_paths\n phase.output_paths = output_paths\n phase.input_file_list_paths = nil\n phase.output_file_list_paths = nil\n end\n end", "def list_scan_files(options = {})\n subs = all_subjects\n filt_subs = looked_for(options[:subjects])\n filt_sess = looked_for(options[:sessions])\n filt_types = looked_for(options[:scan_type] || \"fMRI\") \n scans_list = all_scans(subs, filt_subs, filt_sess, filt_types) \n results = []\n scans_list.each do |scan_dir|\n resources_path = \"#{scan_dir}/Resources\"\n scanlist_type = list_files(\"#{resources_path}\", :directory).map { |e| Pathname.new(e.name).basename.to_s }\n scanlist_type.each do |scan_type|\n scanlist = list_files(\"#{resources_path}/#{scan_type}/Files\", :regular).map { |e| Pathname.new(e.name).basename.to_s }\n if (! options[:ext].blank?) && options[:ext].is_a?(Regexp)\n scanlist = scanlist.select { |scan| scan.match(options[:ext]) }\n end\n scanlist.each { |scan| results << \"#{resources_path}/#{scan_type}/Files/#{scan}\" }\n end \n end\n results\n end", "def files\n result = []\n @my_files.each do |f|\n result << f.fname if FileTest.file?(f.fname)\n end\n result\n end", "def retrieve_files_in_main_dir\n ensure_file_open!\n @file.glob('*').map do |entry|\n next if entry.directory?\n\n entry_file_name = Pathname.new(entry.name)\n [entry_file_name, entry.get_input_stream(&:read)]\n end.compact.to_h\n end", "def files_filtering files\n return files unless @file_regexp\n f = files.select do |file|\n test_name_by_date file\n end\n f\n end", "def cat_files file_groups\n file_groups.each do |group|\n check_exists(group[:paths])\n # this is the Illumina recommended approach to combining these fastq files.\n # See the Casava 1.8 Users Guide for proof\n files_list = group[:paths].join(\" \")\n command = \"cat #{files_list} > #{group[:path]}\"\n execute command\n end\n end", "def execute\n settings = read_settings\n input, remove_regexp, output = get_in_out_removes(settings)\n create_output_dir(output)\n Dir.glob(input).each do |file|\n input_src = get_input(file)\n replaced = remove_head(input_src.encode('UTF-16BE', 'UTF-8', invalid: :replace, undef: :replace, replace: '?').encode('UTF-8'), remove_regexp)\n output_file(file, replaced, output)\n end\n end", "def 500_files(input)\n # naive solution is to flatten and sort\n\n \nend", "def ret_indexed_input_files(input_type)\n input_type_hash(input_type)[:input_files].inject({}) { |h, (k, v) | h.merge(k => input_files_class.new(v[:regexps])) }\n end", "def compile_file_list(output_folder, files, data = {})\n files.each do |target, source|\n Google::LOGGER.debug \"Compiling #{source} => #{target}\"\n target_file = File.join(output_folder, target)\n .gsub('{{product_name}}', @api.prefix[1..-1])\n\n manifest = @config.respond_to?(:manifest) ? @config.manifest : {}\n generate_file(\n data.clone.merge(\n name: target,\n product: @api,\n object: {},\n config: {},\n scopes: @api.scopes,\n manifest: manifest,\n tests: '',\n template: source,\n generated_files: @generated,\n sourced_files: @sourced,\n compiler: compiler,\n output_folder: output_folder,\n out_file: target_file,\n prop_ns_dir: @api.prefix[1..-1].downcase,\n product_ns: @api.prefix[1..-1].camelize(:upper)\n )\n )\n\n %x(goimports -w #{target_file}) if File.extname(target_file) == '.go'\n end\n end", "def setup_outputs_for(input_file_path)\n file_name_without_extension = File.basename(input_file_path, '.*')\n outputs = (exporters || Tracksperanto.exporters).map do | exporter_class |\n export_name = [file_name_without_extension, exporter_class.desc_and_extension].join(\"_\")\n export_path = File.join(File.dirname(input_file_path), export_name)\n exporter_class.new(open_owned_export_file(export_path))\n end\n \n Tracksperanto::Export::Mux.new(outputs)\n end", "def scan\n results = []\n dirs.each do |dir|\n files_in_dir = Dir.glob(File.join(dir,'**','*'))\n results.concat(files_in_dir)\n end\n @known_files = results\n end", "def read_files\n Dir['*', '*/*'].group_by { |f| f.ext || :_dir }.to_symkey\n end", "def dependent_files\n processed.map(&:abs_path).compact.select { |fn| File.exist?(fn) }\n end", "def sort_out_output_directories \n FileUtils.mkdir_p(output_directory)\n FileUtils.mkdir_p(xml_directory)\n FileUtils.mkdir_p(intermediate_directory) unless run_in_memory\n end", "def test_files\n files = tests\n files = files.map{ |f| Dir[f] }.flatten\n files = files.map{ |f| File.directory?(f) ? Dir[File.join(f, '**/*.rb')] : f }\n files = files.flatten.uniq\n files = files.map{ |f| File.expand_path(f) }\n files\n end", "def process_files files=[]\n files.each do |file|\n process_file file\n end\n end", "def get_sample_names(substring)\n inputs = Dir.glob(\"inputs/#{$jobid}/*.txt\").select {|x| x.include? substring}\n inputs = inputs.map{|x| File.basename(x).sub(\".txt\", \"\")}\n #puts substring\n #puts inputs\n #puts $jobid\n return inputs\nend", "def files(args = {})\n opts = {\n base_dir: Dir.pwd,\n recursive: false,\n exts: [],\n non_exts: []\n }.merge(args)\n\n base_dir = opts[:base_dir]\n fail CustomError, \"The directory #{base_dir} is not valid or or not readable!\" unless File.exist?(base_dir)\n\n wildcard = opts[:recursive] ? '**' : ''\n exts = opts[:exts]\n non_exts = opts[:non_exts]\n\n file_with_extension = Dir.glob(File.join(base_dir, wildcard, \"*.{#{exts.join(',')}}\"))\n file_with_no_extension = no_extension_files(base_dir, wildcard, non_exts)\n\n (file_with_extension + file_with_no_extension).sort\n end", "def single_ucf_file_lists\n File.open(single_bad_ucf_file, 'a') do |mergedfile|\n Dir.glob(\"#{output_directory_path}*name.txt\").each do |file|\n File.foreach(file) do |line|\n mergedfile.write(line)\n end\n end\n end\n end", "def group_files file_data, output_path, options = {:prefix => \"L\", :suffix => \".fastq.gz\", :exclude_undetermined => true}\n\t\t\t\t# alternatively inherit the parent class and call super???? \n\t\t\t\t# super \n\t\t\t\t# \t\n groups = {}\n file_data.each do |data|\n if data[:barcode] == \"Undetermined\" and options[:exclude_undetermined]\n log \"# Undetermined sample lane: #{data[:lane]} - name: #{data[:sample_name]}. Skipping\"\n next\n end\n \n group_key = name_for_data data, options\n \n if groups.include? group_key\n if groups[group_key][:sample_name] != data[:sample_name]\n raise \"ERROR: sample names not matching #{group_key} - #{data[:path]}:#{data[:sample_name]}vs#{groups[group_key][:sample_name]}\"\n end\n if groups[group_key][:lane] != data[:lane]\n raise \"ERROR: lanes not matching #{group_key} - #{data[:path]}\"\n end\n groups[group_key][:files] << data\n else\n group_path = File.join(output_path, group_key)\n groups[group_key] = {:group_name => group_key,\n :path => group_path,\n :sample_name => data[:sample_name],\n :read => data[:read],\n :lane => data[:lane],\n :files => [data]\n }\n end\n end\n \n # sort based on read set\n groups.each do |key, group|\n group[:files] = group[:files].sort {|x,y| x[:set] <=> y[:set]}\n group[:paths] = group[:files].collect {|data| data[:path]}\n end\n groups.values\n end", "def imagepipeline\n # delete non-image files\n images = Dir.entries(@from_dir).delete_if do |file|\n (file =~ /\\w+\\.(jpg|jpeg)/) == nil\n end\n images.each do |file|\n yield File.join(@from_dir,file), File.join(@to_dir,file)\n end\n end", "def get_files (path, files_found) \n\t\tif File.directory? path\n\t\t\tDir.foreach path do |file| \n\t\t\t\tif (!EXCLUDED_FILES.include? file)\n\t\t\t\t\tget_files(path+file, files_found) \n\t\t\t\tend\n\t\t\tend\n\t\telsif File.file? path\n\t\t\tfiles_found << path\n\t\tend\n\tend", "def read_out_files(fh,number_files, unpack_35, dup_refs_gt_0)\n out_files = Array.new(number_files)\n header.num_dta_files.times do |i|\n out_files[i] = Mspire::Sequest::Srf::Out.from_io(fh, unpack_35, dup_refs_gt_0)\n end\n out_files\n end", "def build_file_sets\n return if file_nodes.empty? || processing_derivatives?\n file_nodes.each_with_index.map do |node, index|\n file_set = create_file_set(node, files[index])\n file_set\n end\n end", "def autogradeInputFiles(ass_dir, assessment, submission)\n # Absolute path names on the local autolab server of the input\n # autograding input files: 1) The student's handin file, 2)\n # The makefile that runs the process, 3) The tarfile with all\n # of files needed by the autograder. Can be overridden in the\n # lab config file.\n local_handin = File.join(ass_dir, assessment.handin_directory, submission.filename)\n local_makefile = File.join(ass_dir, \"autograde-Makefile\")\n local_autograde = File.join(ass_dir, \"autograde.tar\")\n local_settings_config = File.join(ass_dir, assessment.handin_directory, submission.filename + \".settings.json\")\n\n # Name of the handin file on the destination machine\n dest_handin = assessment.handin_filename\n\n # Construct the array of input files.\n handin = { \"localFile\" => local_handin, \"destFile\" => dest_handin }\n makefile = { \"localFile\" => local_makefile, \"destFile\" => \"Makefile\" }\n autograde = { \"localFile\" => local_autograde, \"destFile\" => \"autograde.tar\" }\n settings_config = { \"localFile\" => local_settings_config, \"destFile\" => \"settings.json\" }\n\n [handin, makefile, autograde, settings_config]\n \n end", "def files_for_rotation\n files = Set.new\n Padrino.dependency_paths.each do |path|\n files += Dir.glob(path)\n end\n reloadable_apps.each do |app|\n files << app.app_file\n files += Dir.glob(app.app_obj.prerequisites)\n files += app.app_obj.dependencies\n end\n files + special_files\n end", "def outputFN( input )\n # First: if we are in a temp folder put it where the script is... \n # otherwise we drop it in the temp folder. This only happens with OCRA.\n tmp = /\\W(temp|tmp)\\W/i\n inreg = /\\Win(put)?$/i\n\n if File.dirname( input ) =~ inreg\n File.expand_path( File.join( File.dirname( input ), \"..\", \"out\" , File.basename( input, \".py\" ) ) )\n elsif tmp =~ File.dirname( __FILE__ )\n if tmp =~ File.dirname( input )\n \"\" # they can choose a directory manually\n else\n File.join File.dirname( input ), File.basename( input, \".py\" )\n end\n else\n File.join File.dirname( __FILE__ ), \"out\", File.basename( input, \".py\" ) \n end\nend", "def split_data_into_files(datafile)\n\n datafiles = []\n output = NIL\n File.open(Rails.root.join(datafile)) do |file| \n counter = 0\n something_was_written = FALSE\n while line = file.gets \n # parse lines and break into different files at #\n if( line.match( /^\\s*\\#+/ ) )\n if (something_was_written && output) \n output.close\n output = NIL\n end\n something_was_written = FALSE\n else \n if (!something_was_written) \n outputfile_name = datafile.gsub(/input/,\"input\" +\n counter.to_s)\n counter +=1\n output = File.open(Rails.root.join(outputfile_name), \"w\") \n datafiles.push((Rails.root.join(outputfile_name)).to_s)\n #datafiles.push( \"../\" + outputfile_name)\n #datafiles.push(Dir.getwd + \"/\" + outputfile_name)\n end\n # check if line matches @n_nodes digits\n nodes_minus_one = (@job.nodes - 1).to_s\n if (line.match( /^\\s*(\\.?\\d+\\.?\\d*\\s+){#{nodes_minus_one}}\\.?\\d+\\.?\\d*\\s*$/ ) ) \n output.puts line\n logger.info \"write line\" + line\n something_was_written = TRUE\n else\n @error_message = \"The data you entered is invalid. This :#{line.chop!}: is not a correct line.\"\n logger.warn \"Error: Input data not correct. This :#{line}: is not a correct line.\"\n return NIL\n end\n end\n end \n file.close\n if (output) \n output.close\n end\n end\n return datafiles\n end", "def split_input filename, pieces\n input = {}\n name = nil\n seq=\"\"\n sequences=0\n output_files=[]\n if pieces > 1\n File.open(filename).each_line do |line|\n if line =~ /^>(.*)$/\n sequences+=1\n if name\n input[name]=seq\n seq=\"\"\n end\n name = $1\n else\n seq << line.chomp\n end\n end\n input[name]=seq\n # construct list of output file handles\n outputs=[]\n pieces = [pieces, sequences].min\n pieces.times do |n|\n outfile = \"#{filename}_chunk_#{n}.fasta\"\n outfile = File.expand_path(outfile)\n outputs[n] = File.open(\"#{outfile}\", \"w\")\n output_files[n] = \"#{outfile}\"\n end\n # write sequences\n count=0\n input.each_pair do |name, seq|\n outputs[count].write(\">#{name}\\n\")\n outputs[count].write(\"#{seq}\\n\")\n count += 1\n count %= pieces\n end\n outputs.each do |out|\n out.close\n end\n else\n output_files << filename\n end\n output_files\n end", "def concat\n content = \"\"\n files = []\n @opts[:files].each do |file|\n files += if single? file\n [\"#{@opts[:input_dir]}/#{file}.#{@opts[:type]}\"]\n else\n expand file\n end\n end\n files.each do |file|\n content << File.read(file)\n content << \"\\n\"\n end\n\n if @opts[:outputs]\n @opts[:outputs].each do |name|\n output = \"#{name}.#{@opts[:type]}\"\n local = content\n local = @opts[:filter].call(output, local) if @opts[:filter]\n File.open(output, \"w\"){ |f| f.write local.strip }\n UI.info \"Concatenated #{output}\"\n end\n else\n content = @opts[:filter].call(@output, content) if @opts[:filter]\n File.open(@output, \"w\"){ |f| f.write content.strip }\n UI.info \"Concatenated #{@output}\"\n end\n end", "def copy_files(output_folder)\n copy_file_list(output_folder, @config.files.copy)\n end", "def create_output_files\n return unless @option_output_path\n return if @collected_nodes.empty?\n @collected_nodes.each do |certname, properties|\n next if properties['settings'].empty?\n output_file = \"#{@option_output_path}/nodes/#{certname}.yaml\"\n File.write(output_file, properties['settings'].to_yaml)\n output(\"## Wrote Hiera YAML file: #{output_file}\\n\\n\")\n end\n return if @common_settings.empty?\n output_file = \"#{@option_output_path}/common.yaml\"\n File.write(output_file, @common_settings.to_yaml)\n end", "def fetch_archives(output = render_template)\n return [] if output.blank?\n filename, path = create_tempfile(output)\n archive = archive_files(File.dirname(path), filename, [expanded_path(path)])\n remove_tempfile(path)\n split_archive(archive, \"part_#{filename}\", 1024 * 1024 * 100) # 104_857_600\n end", "def for_files(*wildcards)\n wildcards.each do |wildcard|\n Dir[wildcard].each do |fn|\n yield(fn)\n end\n end\n end", "def for_files(*wildcards)\n wildcards.each do |wildcard|\n Dir[wildcard].each do |fn|\n yield(fn)\n end\n end\n end", "def filtered(files); end", "def list_anat_files(options = {}) \n subs = all_subjects\n filt_subs = looked_for(options[:subjects])\n filt_sess = looked_for(options[:sessions])\n filt_types = looked_for(options[:t_types] || [ \"T1\", \"T2\" ]) \n scans_list = all_scans(subs, filt_subs, filt_sess, filt_types) \n results = []\n scans_list.each do |scan_dir|\n resources_path = \"#{scan_dir}/Resources\"\n scanlist_type = list_files(\"#{resources_path}\", :directory).map { |e| Pathname.new(e.name).basename.to_s }\n scanlist_type.each do |scan_type|\n scanlist = list_files(\"#{resources_path}/#{scan_type}/Files\", :regular).map { |e| Pathname.new(e.name).basename.to_s }\n if (! options[:ext].blank?) && options[:ext].is_a?(Regexp)\n scanlist = scanlist.select { |scan| scan.match(options[:ext]) }\n end\n scanlist.each { |scan| results << \"#{resources_path}/#{scan_type}/Files/#{scan}\" }\n end \n end\n results\n end", "def extract_all\n files.each do |file|\n extract file\n end\n end", "def files\n files_in_path.map do |file|\n TemplateFile.from_full_path(@path, file) unless File.directory?(file)\n end.compact\n end", "def category_result_files\n crs = self.data_directory.find_all { |name| name =~ /^multi_.*\\.htm\\.yml$/ }\n crs.collect { |fn| File.join(self.data_directory.path, fn) }\n end", "def merge_data(input_files)\n for filename in input_files\n merge_into_hash(filename)\n end\n sanitize_hash\n end", "def sequential_files\n get_files_in_dir(@sequential_dir)\n end", "def read(files); end", "def read(files); end", "def source_files(target_name, &filter)\n target = target(target_name)\n\n source_files = target.source_build_phase.files\n source_files = source_files.select(&filter) unless filter.nil?\n\n source_paths = source_files.map { |pathname|\n relative_path(pathname)\n }\n\n source_paths\n end", "def input_files_pattern\n @args.options[:input_files_pattern]\n end", "def build_file_list\n puts_and_logs 'Finding files...'\n file_list = []\n config[:source].each do |entry|\n if File.directory?(entry)\n populate_list_of_files_from_directory(file_list, entry) \n next\n end\n if File.file?(entry)\n populate_list_of_files_from_file(file_list, entry) \n next\n end\n logger.warn \"\\\"#{entry}\\\" is neither a directory nor a regular file. Ignored...\"\n end\n logger.debug(file_list)\n file_list\n end", "def filesets_for_export(include_files = true)\n filesets = []\n has_ereader_files = false\n # get file-level filesets (image, document, video, etc); remove ereader (do 'em separately)\n all_files = Bplmodels::Finder.getFiles(pid)\n\n ereader_files = all_files.delete(:ereader) || []\n\n ## all_files.delete(:images) # uncomment for easier testing of IA objects\n # Make all non ereader files part of a \"lazy\" eumerator see Enumerator::Lazy at https://ruby-doc.org/core-2.6.8/Enumerator/Lazy.html\n # Note all_files.values.reduce(:+) will flatten the values in the all_files hash into a single array\n filesets = filesets_for_files_lazy(all_files.values.reduce(:+), include_files)\n # get EReader filesets and combine, make EPub the 'primary'\n if ereader_files.present?\n has_ereader_files = true\n ereader_fileset_for_export = nil\n if include_files\n ereader_filesets = filesets_for_files(ereader_files, include_files)\n ereader_filesets.each_with_index do |er_fileset, index|\n if er_fileset[:file_set][:files][0][:content_type] == 'application/epub+zip'\n ereader_fileset_for_export = er_fileset\n ereader_filesets.delete_at(index)\n end\n end\n ereader_filesets.each do |er_fileset|\n er_fileset[:file_set][:files].each do |er_file|\n next unless er_file[:file_type].match?(/ebook_access/)\n\n ereader_fileset_for_export[:file_set][:files] << er_file\n end\n end\n else\n ereader_files = ereader_files.select { |erf| erf[\"mime_type_tesim\"].include?(\"application/epub+zip\") }\n if ereader_files.present?\n ereader_fileset_obj = Bplmodels::File.find(ereader_files.first['id'])\n ereader_fileset_for_export = ereader_fileset_obj.export_data_for_curator_api(include_files)\n end\n end\n\n # have to modify keys of ebook_access_mobi and ebook_access_daisy files to use epub pid\n # NOTE on moving this up from below\n # Instead of reiterating through all of the fileset objects to check for the ereader object we just added...\n # modify the one we are adding before putting it into the filesets enum\n if ereader_fileset_for_export.present?\n pid_for_key = ereader_fileset_for_export.dig(:file_set, :ark_id)\n ereader_fileset_for_export[:file_set][:files].each do |file|\n if file[:file_type] == 'ebook_access_daisy' || file[:file_type] == 'ebook_access_mobi'\n key_parts = file[:key].split('/')\n key_parts[1] = pid_for_key if key_parts[1].match?(/[\\w-]*:[0-9a-z]*/)\n file[:key] = key_parts.join('/')\n end\n end\n end\n # NOTE since filesets is a Enumerator::Lazy object the << operator does not work anymore\n # Instead you have to wrap the object into a Enumerator subtype(Array in this case) and add(+) it\n # This will create an Enumerator::Chain object\n filesets = filesets + Array.wrap(ereader_fileset_for_export)\n end\n\n # if has_ereader_files\n # filesets.each do |fs|\n # fileset = fs[:file_set]\n # next unless fileset[:file_set_type] == 'ereader'\n #\n # pid_for_key = fileset[:ark_id]\n # fileset[:files].each do |file|\n # if file[:file_type] == 'ebook_access_daisy' || file[:file_type] == 'ebook_access_mobi'\n # key_parts = file[:key].split('/')\n # key_parts[1] = pid_for_key if key_parts[1].match?(/[\\w-]*:[0-9a-z]*/)\n # file[:key] = key_parts.join('/')\n # end\n # end\n # end\n # end\n # get the object-level filesets (metadata, plainText, etc)\n object_filesets = object_filesets_for_export(object_filestreams_for_export)\n filesets + object_filesets\n end", "def all_matching_files\n @all_matching_files ||= find_files\n end", "def all_matching_files\n @all_matching_files ||= find_files\n end", "def get_associated_files(source_file_or_package)\n if package = pool.packages.detect {|pkg| pkg.name == source_file_or_package}\n pool.compile_package(package).to_a\n elsif source_file = pool.lookup(source_file_or_package)\n pool.lookup_dependencies(source_file).to_a << source_file\n else\n # Try using arg as mask\n mask = source_file_or_package.to_s\n if !(mask =~ /^\\s*$/) && !(source_files = pool.provides_tree.glob(mask).compact).empty?\n source_files.map {|source| get_associated_files(source) }.flatten\n else\n # No dice\n []\n end\n end\n end", "def files_list(path_list: '', name_list: '')\n # Check\n path_list = [path_list] if path_list.class == String # transform String in Array\n name_list = [name_list] if name_list.class == String # transform String in Array\n path_list.uniq! # Remove duplicate\n name_list.uniq! # Remove duplicate\n (@log.fatal {'FileManager.files_list() : no path given'}; exit ) if :path_list.size == 0\n (@log.fatal {'FileManager.files_list() : no file list given'}; exit ) if :name_list.size == 0\n # Init\n file_array = []\n # Create the list\n for path in path_list\n for file in name_list\n files = Dir.glob(path + '/' + file)\n file_array = file_array + files\n @log.info {\" FileManager search for \\\"#{path}/#{file}\\\" => found : #{Dir.glob(path + '/' + file).size} file(s)\"}\n end\n end\n # Something wrong ?\n if file_array.size == 0\n @log.fatal {\"FileManager : no Input Files found by files_list().path_list : #{path_list.to_s} name_list #{name_list.to_s}\"}; exit\n else\n return file_array\n end\n end", "def required_files(root = nil)\n sort!\n files = sources.map {|s| s.required_files }.flatten\n if root\n root = Pathname.new(File.expand_path(root))\n files = files.map {|f| Pathname.new(File.expand_path(f)).relative_path_from(root).to_s }\n end\n files\n end", "def open_fastq_sub_output_files\n return if @stats_only\n if @pass_sub_filename.nil? && @reject_sub_filename.nil?\n # no files open\n @pass_sub = nil\n @reject_sub = nil\n else\n # split the reads that passed and the reads that failed\n # the quality filter into separate files; if either one\n # of the file names was not specified then discard the\n # corresponding reads\n unless @pass_sub_filename.nil?\n STDERR.puts \"Opening #{make_temp_filename(@pass_sub_filename)} for subfile pass filter FASTQ output.\" if @verbose\n @pass_sub = open_fastq_output(make_temp_filename(@pass_sub_filename))\n end\n unless @reject_sub_filename.nil?\n STDERR.puts \"Opening #{make_temp_filename(@reject_sub_filename)} for subfile reject filter FASTQ output.\" if @verbose\n @reject_sub = open_fastq_output(make_temp_filename(@reject_sub_filename))\n end\n end\n end", "def files() = files_path.glob('**/*')", "def input_files=(files)\n @input_files = files.map do |file|\n file.with_encoding(encoding)\n end\n end", "def build_file_sets\n return [] if file_nodes.empty?\n file_nodes.each_with_index.map do |node, index|\n file_set = create_file_set(node, files[index])\n file_set\n end\n end", "def check_list(inlist, outlist)\n p inlist, outlist\n outlist += inlist.split(\":\")\n p outlist\n outlist.map! { |file| File.expand_path(file) }\n outlist.each do |file|\n unless File.exists?(file)\n raise \"Can't find file \\\"#{file}\\\"\"\n end\n end\nend", "def process_globs globs\n result = globs.flat_map do |glob|\n Dir[File.join directory, glob]\n .map{ |f| f.gsub(/\\\\/, '/') }\n .select { |f| File.file?(f) }\n end\n result\n end", "def find(args, mode)\n return target_files_in_dir if args.empty?\n\n files = []\n\n args.uniq.each do |arg|\n files += if File.directory?(arg)\n target_files_in_dir(arg.chomp(File::SEPARATOR))\n else\n process_explicit_path(arg, mode)\n end\n end\n\n files.map { |f| File.expand_path(f) }.uniq\n end", "def existing_files(my_files = true)\n # I can do this in a convoluted set of if checks, of a couple readable selects.\n output = target_files.select { |f| File.exist? f }\n output.delete_if { |f| my_files && is_my_file?(f)}\n\n return output\n end", "def prepare_reads(base, map, fqgz0, *fqgzs0)\n\n fqgzs = [fqgz0] + fqgzs0\n\n bcs = Hash.new\n open(map, 'r').each do |line|\n bc, well = line.rstrip.split(',')\n bcs[bc] = well\n end\n \n bcl = bcs.keys.map!{|key| key.length}.sort.uniq[0]\n\n tso_pattern = '.'*options.umi_length + '.'*bcl + 'GG'\n\n #\n \n STDERR.puts \"#{`date`.strip}: Demultiplexing each raw sequence files...\"\n \n fqgz2csv0 = Hash.new\n fqgz2csv1 = Hash.new\n fqgz2base = Hash.new\n fqgzs.each do |fqgz|\n fqgz2csv0[fqgz] = get_temporary_path('strt.preprocess', 'csv', false)\n fqgz2csv1[fqgz] = get_temporary_path('strt.preprocess', 'csv', false)\n fqgz2base[fqgz] = get_temporary_path('strt.preprocess', 'base', false)\n end\n\n Parallel.map(fqgz2csv0.keys, in_processes: options.parallel) do |fqgz|\n cmds = [\n \"unpigz -c #{fqgz}\",\n \"#{fq1l_convert_command(options)}\",\n \"#{fq1l_count_command(options)} #{fqgz2csv0[fqgz]}\",\n \"fq1l match_5end#{grep_prefix_option(options)} #{tso_pattern}\",\n \"#{fq1l_count_command(options)} #{fqgz2csv1[fqgz]}\",\n \"fq1l annotate_index --first-cycle=#{options.umi_length+1} --last-cycle=#{options.umi_length+bcl}\",\n \"fq1l annotate_umi --first-cycle=1 --last-cycle=#{options.umi_length}\",\n \"fq1l sort_index#{coreutils_prefix_option}#{parallel_option(options)} --buffer-size=#{(options.maximum_memory/(fqgz2csv0.keys.size+1)).to_i}%\",\n \"fq1l demultiplex #{fqgz2base[fqgz]} #{map}\"\n ]\n cmds.insert(2, \"#{head_command(options)} -n #{options.reads}\") unless options.reads.nil?\n stats = Open3.pipeline(*cmds)\n stats.each_index do |i|\n raise \"Fail at process #{i}; #{stats[i]}; #{cmds[i]}\" unless stats[i].success? || (stats[i].signaled? && stats[i].termsig == 13)\n end\n end\n\n system \"fq1l sum_counts #{fqgz2csv0.values.join(' ')} > #{base}.count.step1.csv\"\n unlink_files(fqgz2csv0.values)\n \n system \"fq1l sum_counts #{fqgz2csv1.values.join(' ')} > #{base}.count.step2.csv\"\n unlink_files(fqgz2csv1.values)\n\n #\n \n (bcs.values + ['NA']).each do |well|\n\n STDERR.puts \"#{`date`.strip}: Finishing well #{well}...\"\n \n tmpfqgzs = fqgz2base.values.map {|base| \"#{base}.#{well}.fq.gz\"}\n csvs = Array.new(6) {|i| \"#{base}.#{well}.count.step#{i+3}.csv\"}\n \n pipeline(\"unpigz -c #{tmpfqgzs.join(' ')}\",\n \"#{fq1l_convert_command(options)}\",\n \"#{fq1l_count_command(options)} #{csvs[0]}\",\n \"#{fq1l_sort_command} --buffer-size=#{(options.maximum_memory/2).to_i}%\",\n \"fq1l exclude_duplicate\",\n \"#{fq1l_count_command(options)} #{csvs[1]}\",\n \"fq1l trim_3end_quality\",\n \"#{fq1l_count_command(options)} #{csvs[2]}\",\n \"fq1l trim_3end_primer#{coreutils_prefix_option}#{grep_prefix_option(options)}#{parallel_option(options)}\",\n \"#{fq1l_count_command(options)} #{csvs[3]}\",\n \"#{fq1l_sort_command} --buffer-size=#{(options.maximum_memory/2).to_i}%\",\n \"fq1l exclude_degenerate\",\n \"#{fq1l_count_command(options)} #{csvs[4]}\",\n \"fq1l trim_5end --minimum-length=#{options.minimum_length} #{tso_pattern}+\",\n \"#{fq1l_count_command(options)} #{csvs[5]}\",\n \"fq1l restore#{coreutils_prefix_option}\",\n \"pigz -c > #{base}.#{well}.fq.gz\")\n \n unlink_files(tmpfqgzs)\n \n end\n \n end", "def get_sample_names(substring)\n inputs = Dir.glob(\"inputs/#{$jobid}/*.txt\").select {|x| x.include? substring}\n inputs = inputs.map{|x| File.basename(x).sub(\".txt\", \"\")}\n return inputs\nend", "def get_sample_names(substring)\n inputs = Dir.glob(\"inputs/#{$jobid}/*.txt\").select {|x| x.include? substring}\n inputs = inputs.map{|x| File.basename(x).sub(\".txt\", \"\")}\n return inputs\nend", "def get_sample_names(substring)\n inputs = Dir.glob(\"inputs/#{$jobid}/*.txt\").select {|x| x.include? substring}\n inputs = inputs.map{|x| File.basename(x).sub(\".txt\", \"\")}\n return inputs\nend", "def get_sample_names(substring)\n inputs = Dir.glob(\"inputs/#{$jobid}/*.txt\").select {|x| x.include? substring}\n inputs = inputs.map{|x| File.basename(x).sub(\".txt\", \"\")}\n return inputs\nend", "def get_sample_names(substring)\n inputs = Dir.glob(\"inputs/#{$jobid}/*.txt\").select {|x| x.include? substring}\n inputs = inputs.map{|x| File.basename(x).sub(\".txt\", \"\")}\n return inputs\nend", "def process_other_source_files\n files = @options[:include_source_files].flatten\n files.each do |f|\n FileUtils.cp Dir[f], @working_dir\n end\n end", "def files\n templates.map(&:filename)\n end", "def build_output_file_name(from_file, to_file)\n return if to_file.nil?\n if FileExtensions::EXT.include?(to_file.to_s)\n yield from_file.gsub(/#{File.extname(from_file)}$/, \".#{to_file}\")\n else\n yield \"#{to_file}\"\n end\n end" ]
[ "0.7032774", "0.70138144", "0.6730175", "0.66165453", "0.65996295", "0.65400815", "0.65400815", "0.63889563", "0.6332165", "0.61729926", "0.61503655", "0.6077135", "0.6071689", "0.6044346", "0.5937593", "0.5893649", "0.5862156", "0.5841952", "0.57922214", "0.5784225", "0.57794064", "0.5776776", "0.5704839", "0.5697738", "0.5694491", "0.5693932", "0.5691698", "0.5669376", "0.56662256", "0.5644822", "0.563574", "0.5623234", "0.5597288", "0.55813104", "0.5580844", "0.556719", "0.556716", "0.55637145", "0.55518204", "0.5544347", "0.55412906", "0.5501866", "0.5496509", "0.54960454", "0.548571", "0.54850554", "0.5469899", "0.5456035", "0.545301", "0.5443654", "0.5439934", "0.54367524", "0.5432803", "0.54256237", "0.5421813", "0.5420964", "0.54199916", "0.5419802", "0.54191035", "0.5415675", "0.54128", "0.54110724", "0.54099596", "0.5397974", "0.5397974", "0.53938186", "0.5389376", "0.5384081", "0.53725696", "0.5372196", "0.53677773", "0.5367168", "0.5355031", "0.5355031", "0.5352447", "0.5350669", "0.53439283", "0.5343641", "0.5340781", "0.5340781", "0.5333454", "0.5331426", "0.53254324", "0.531596", "0.53032726", "0.52880764", "0.5286384", "0.5285995", "0.5276631", "0.527649", "0.5264545", "0.5254647", "0.52528286", "0.52506435", "0.52506435", "0.52506435", "0.52506435", "0.52425927", "0.5240653", "0.52364993" ]
0.6940107
2
Return output files based on the provided output files
def output_files @output_files ||= to_file(@args.output) end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def output_files\n @output_files ||= Fileset.new()\n end", "def output_files\n get_info :output_files\n end", "def output_files\n @output_files ||= Fileset.new\n end", "def output_files\n @output_files ||= Fileset.new\n end", "def _output_paths(file)\n input_file_dir = File.dirname(file)\n file_name = _output_filename(file)\n file_name = \"#{file_name}.erb\" if _append_html_ext_to_output_path?(file_name)\n input_file_dir = input_file_dir.gsub(Regexp.new(\"#{options[:input]}(\\/){0,1}\"), '') if options[:input]\n\n if options[:output]\n Array(options[:output]).map do |output_dir|\n File.join(output_dir, input_file_dir, file_name)\n end\n else\n if input_file_dir == ''\n [file_name]\n else\n [File.join(input_file_dir, file_name)]\n end\n end\n end", "def create_output_files\n return unless @option_output_path\n return if @collected_nodes.empty?\n @collected_nodes.each do |certname, properties|\n next if properties['settings'].empty?\n output_file = \"#{@option_output_path}/nodes/#{certname}.yaml\"\n File.write(output_file, properties['settings'].to_yaml)\n output(\"## Wrote Hiera YAML file: #{output_file}\\n\\n\")\n end\n return if @common_settings.empty?\n output_file = \"#{@option_output_path}/common.yaml\"\n File.write(output_file, @common_settings.to_yaml)\n end", "def files_from_generator_output(output, type = 'create')\n output.to_a.map { |line| line.scan(/#{type}\\s+([^\\s]+)$/).flatten.first }.compact.select { |f| File.exist?(f) and !File.directory?(f) }\nend", "def check_output_files\n return if @output_files.nil?\n\n flag = true\n @output_files.uniq.each do |file_path|\n unless File.exist?(file_path)\n warn \"Output file not found: #{file_path}\"\n flag = false\n end\n end\n puts 'All output file exist.' if flag\n end", "def all_filenames\n\n\n # This checks for it being an array and not nil!\n # return @filenames if @filenames && [email protected]?\n\n # This means we can add files to the output\n return $filenames if $filenames && $filenames.size > 5 # I guess that small numbers are errors too\n \n if @directory\n @output_directory ||= File.join(@directory, 'Build')\n $filenames = Dir.glob(File.join(@directory, \"**/*\")).map {|file|\n next if file.start_with?(@output_directory)\n next if File.directory?(file)\n file.gsub(@directory+\"/\", \"\")\n }.compact\n else\n []\n end\n end", "def sort_out_output_directories \n FileUtils.mkdir_p(output_directory)\n FileUtils.mkdir_p(xml_directory)\n FileUtils.mkdir_p(intermediate_directory) unless run_in_memory\n end", "def generate_downstream_files(search_engines, list_of_files, overwrite=false)\n wanted = search_engines.map do |se|\n se.search_input_extension\n end\n wanted.each do |ext|\n #####\n end\n end", "def target_files\n files.map {|f| target_file f}\n end", "def setup_outputs_for(input_file_path)\n file_name_without_extension = File.basename(input_file_path, '.*')\n outputs = (exporters || Tracksperanto.exporters).map do | exporter_class |\n export_name = [file_name_without_extension, exporter_class.desc_and_extension].join(\"_\")\n export_path = File.join(File.dirname(input_file_path), export_name)\n exporter_class.new(open_owned_export_file(export_path))\n end\n \n Tracksperanto::Export::Mux.new(outputs)\n end", "def category_result_files\n crs = self.data_directory.find_all { |name| name =~ /^multi_.*\\.htm\\.yml$/ }\n crs.collect { |fn| File.join(self.data_directory.path, fn) }\n end", "def create_files\n tests.each do |test|\n FileUtils.mkdir(test.id.to_s) unless Dir.exist?(test.id.to_s) if test.option[:dir]\n files = []\n files << test.action.split('?').first\n files += test.option[:implicit]\n files << test.result_rdf if test.result_rdf\n files << test.result_json if test.result_json\n files.compact.select {|f| !File.exist?(f)}.each do |f|\n File.open(f, \"w\") {|io| io.puts( f.end_with?('.json') ? \"{}\" : \"\")}\n end\n end\n end", "def list_scan_files(options = {})\n subs = all_subjects\n filt_subs = looked_for(options[:subjects])\n filt_sess = looked_for(options[:sessions])\n filt_types = looked_for(options[:scan_type] || \"fMRI\") \n scans_list = all_scans(subs, filt_subs, filt_sess, filt_types) \n results = []\n scans_list.each do |scan_dir|\n resources_path = \"#{scan_dir}/Resources\"\n scanlist_type = list_files(\"#{resources_path}\", :directory).map { |e| Pathname.new(e.name).basename.to_s }\n scanlist_type.each do |scan_type|\n scanlist = list_files(\"#{resources_path}/#{scan_type}/Files\", :regular).map { |e| Pathname.new(e.name).basename.to_s }\n if (! options[:ext].blank?) && options[:ext].is_a?(Regexp)\n scanlist = scanlist.select { |scan| scan.match(options[:ext]) }\n end\n scanlist.each { |scan| results << \"#{resources_path}/#{scan_type}/Files/#{scan}\" }\n end \n end\n results\n end", "def output_file\n [\n output_root,\n feed_name,\n batch_id + \".\" + output_extension\n ].join(\"/\")\n end", "def cleanupOutput()\n\tlines_array = IO.readlines(\"outputList.txt\")\n\n\toutputFile = File.open(\"outputListFinal.txt\", 'w')\n\tlines_array.each do |line|\n\t\tif line =~ /file '(.*)'/\n\t\t\tfilename = $1.to_s\n\t\t\tif(File.exist?(filename))\n\t\t\t\toutputFile.write(\"file '\"+filename+\"'\\n\")\n\t\t\tend\n\t\tend\n\tend\n\n\toutputFile.close\nend", "def odir(file_name)\n @output_files ||= []\n file_path = File.join(@out_dir, file_name)\n @output_files << file_path\n file_path\n end", "def make_files(targets)\n file_pairs = targets.map { |t| \n filename = sanitize_filename(t[:data][:name] + '.json')\n [filename, t]\n }\n unique_pairs = uniqufy(file_pairs)\n unique_pairs.each do |name, content| \n puts \"Write #{File.absolute_path(name)}\"\n File.open(name, 'w') { |file| file.write(JSON.pretty_generate(content)) }\n end\nend", "def create_files(output, dir)\n FileUtils.mkdir_p(dir) # create directory if doesn't exist\n\n words = File.new(\"#{dir}/words.txt\", 'w')\n sequences = File.new(\"#{dir}/sequences.txt\", 'w')\n\n output.each do |key, value|\n sequences.puts(key.to_s)\n words.puts(value.to_s)\n end\n\n sequences.close\n words.close\n end", "def run\n executor.run\n @files.map { |file| File.join(@output_dir, file.relative_file_name) }\n end", "def files\n @files ||= lambda {\n sorted_relevant_files = []\n\n file_globs.each do |glob|\n current_glob_files = Pathname.glob(glob)\n relevant_glob_files = relevant_files & current_glob_files\n\n relevant_glob_files.map! do |file|\n File.new(path: file,\n namespaces: namespaces,\n decryption_keys: decryption_keys,\n encryption_keys: encryption_keys,\n signature_name: signature_name)\n end\n\n sorted_relevant_files += relevant_glob_files\n end\n\n sorted_relevant_files.uniq\n }.call\n end", "def files\n ext_files = mapper.extracted_files || []\n ext_files + [mapper.zip.name.to_s]\n end", "def compile_file_list(output_folder, files, data = {})\n files.each do |target, source|\n Google::LOGGER.debug \"Compiling #{source} => #{target}\"\n target_file = File.join(output_folder, target)\n .gsub('{{product_name}}', @api.prefix[1..-1])\n\n manifest = @config.respond_to?(:manifest) ? @config.manifest : {}\n generate_file(\n data.clone.merge(\n name: target,\n product: @api,\n object: {},\n config: {},\n scopes: @api.scopes,\n manifest: manifest,\n tests: '',\n template: source,\n generated_files: @generated,\n sourced_files: @sourced,\n compiler: compiler,\n output_folder: output_folder,\n out_file: target_file,\n prop_ns_dir: @api.prefix[1..-1].downcase,\n product_ns: @api.prefix[1..-1].camelize(:upper)\n )\n )\n\n %x(goimports -w #{target_file}) if File.extname(target_file) == '.go'\n end\n end", "def fetch_archives(output = render_template)\n return [] if output.blank?\n filename, path = create_tempfile(output)\n archive = archive_files(File.dirname(path), filename, [expanded_path(path)])\n remove_tempfile(path)\n split_archive(archive, \"part_#{filename}\", 1024 * 1024 * 100) # 104_857_600\n end", "def create_files\n tests.each do |test|\n files = [test.action, test.urgna2012, test.urdna2015].compact\n files.compact.select {|f| !File.exist?(f)}.each do |f|\n File.open(f, \"w\") {|io| io.puts( f.end_with?('.json') ? \"{}\" : \"\")}\n end\n end\n end", "def resource_output_paths(resource_input_paths)\n resource_input_paths.map do |resource_input_path|\n base_path = '${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}'\n extname = File.extname(resource_input_path)\n basename = extname == '.xcassets' ? 'Assets' : File.basename(resource_input_path)\n output_extension = Target.output_extension_for_resource(extname)\n File.join(base_path, File.basename(basename, extname) + output_extension)\n end.uniq\n end", "def gen_sub_directories\n @outputdir.mkpath\n end", "def cat_files file_groups\n file_groups.each do |group|\n check_exists(group[:paths])\n # this is the Illumina recommended approach to combining these fastq files.\n # See the Casava 1.8 Users Guide for proof\n files_list = group[:paths].join(\" \")\n command = \"cat #{files_list} > #{group[:path]}\"\n execute command\n end\n end", "def generate_index_files\n @folders.each do |folder, files|\n puts \" + Creating #{@dest}/#{folder}/index.html\" if @verbose\n File.open(\"#{@dest}/#{folder}/index.html\", \"w\") do |index|\n title = \"Rails Plug-in for #@name #@version\"\n index.write(\"<html><head><title>#{title}</title></head>\\n\")\n index.write(\"<body>\\n\")\n index.write(\"<h2>#{title}</h2>\\n\")\n extra_links = create_extra_links()\n index.write(\"<p>#{extra_links}</p>\\n\") if extra_links\n files.each { |fn|\n puts(\" - Adding #{fn}\") if @verbose\n index.write(\"&nbsp;&nbsp;<a href=\\\"#{fn}\\\">#{fn}</a><br/>\\n\")\n }\n index.write(\"<hr size=\\\"1\\\"/><p style=\\\"font-size: x-small\\\">Generated with RailsPluginPackageTask<p>\")\n index.write(\"</body>\\n\")\n index.write(\"</html>\\n\")\n end\n end\n end", "def effective_output_path(outputdir)\n outputdir ? File.join(outputdir, output_name) : output_path\n end", "def generate_index_files\n @folders.each do |folder, files|\n puts \" + Creating #{@dest}/#{folder}/index.html\" if @verbose\n File.open(\"#{@dest}/#{folder}/index.html\", \"w\") do |index|\n title = \"Rails Plug-in for #@name #@version\"\n index.write(\"<html><head><title>#{title}</title></head>\\n\")\n index.write(\"<body>\\n\")\n index.write(\"<h2>#{title}</h2>\\n\")\n extra_links = create_extra_links()\n index.write(\"<p>#{extra_links}</p>\\n\") if extra_links \n files.each { |fn|\n puts(\" - Adding #{fn}\") if @verbose\n index.write(\"&nbsp;&nbsp;<a href=\\\"#{fn}\\\">#{fn}</a><br/>\\n\")\n }\n index.write(\"<hr size=\\\"1\\\"/><p style=\\\"font-size: x-small\\\">Generated with RailsPluginPackageTask<p>\")\n index.write(\"</body>\\n\")\n index.write(\"</html>\\n\")\n end\n end\n end", "def output(output_path)\n output_check(output_path)\n \n files = Find.find(staging_path).to_a\n safesystem(\"zip\", output_path, *files)\n end", "def getOutputFiles\n if (self.getCompleted)\n file_list = JSON.parse(@client[\"/StatsDownloadUrls?loadTestId=#{@test_id}\"].get)['outputFiles']\n if (file_list && file_list.length == @server_cnt)\n file_list\n end\n end\n end", "def files_for_rotation\n files = Set.new\n Padrino.dependency_paths.each do |path|\n files += Dir.glob(path)\n end\n reloadable_apps.each do |app|\n files << app.app_file\n files += Dir.glob(app.app_obj.prerequisites)\n files += app.app_obj.dependencies\n end\n files + special_files\n end", "def getResultFileNames()\n resultDir = File.dirname(File.dirname(File.expand_path(Dir.pwd)))\n @demuxBustardSummaryXML = resultDir + \"/DemultiplexedBustardSummary.xml\"\n\n baseCallsStatsDir = Dir[resultDir + \"/Basecall_Stats_*\"]\n @demuxStatsHTM = baseCallsStatsDir[0] + \"/Demultiplex_Stats.htm\"\n\n if !File::exist?(@demuxBustardSummaryXML)\n raise \"File \" + @demuxBustardSummaryXML + \" does not exist or is unreadable\"\n end\n\n if !File::exist?(@demuxStatsHTM)\n raise \"File \" + @demuxStatsHTM + \" does not exist or is unreadable\"\n end\n \n end", "def create_own_results_file(filename,output)\n # Create a blank file and put the output in\n self.create_file(\"#{filename}\", output)\n end", "def output_files_exist?(dir)\n File.file?(\"#{dir}/sequences.txt\") || File.file?(\"#{dir}/words.txt\") ? true : false\n end", "def get_content_output_filename output_dir, filename\n\n file_basename = File.basename(filename, '.*')\n\n if file_basename != 'index'\n output_dir = File.join(output_dir, file_basename)\n Dir.mkdir( output_dir )\n end\n\n File.join(output_dir, 'index.html')\n end", "def takeFilesNames\nDir['result*.*'].each do |file_name|\n @files_names.push(file_name)\nend\nend", "def open_fastq_sub_output_files\n return if @stats_only\n if @pass_sub_filename.nil? && @reject_sub_filename.nil?\n # no files open\n @pass_sub = nil\n @reject_sub = nil\n else\n # split the reads that passed and the reads that failed\n # the quality filter into separate files; if either one\n # of the file names was not specified then discard the\n # corresponding reads\n unless @pass_sub_filename.nil?\n STDERR.puts \"Opening #{make_temp_filename(@pass_sub_filename)} for subfile pass filter FASTQ output.\" if @verbose\n @pass_sub = open_fastq_output(make_temp_filename(@pass_sub_filename))\n end\n unless @reject_sub_filename.nil?\n STDERR.puts \"Opening #{make_temp_filename(@reject_sub_filename)} for subfile reject filter FASTQ output.\" if @verbose\n @reject_sub = open_fastq_output(make_temp_filename(@reject_sub_filename))\n end\n end\n end", "def copyOutputFiles(fromDir, filePattern, outDir)\n\tFileUtils.mkdir_p outDir unless exists?(outDir)\n\tDir.glob(File.join(fromDir, filePattern)){|file|\n\t\tcopy(file, outDir) if File.file?(file)\n\t}\nend", "def output_file(file = nil)\n if file\n output_filename = file.name\n if !@output_filename.nil? && @files.length == 1\n output_filename = @output_filename\n end\n else\n output_filename = Time.now.to_s.split(' ').join('_')\n output_filename = @files.last.name if @files.length == 1 || @merge\n output_filename = @output_filename unless @output_filename.nil?\n end\n\n ::File.join(output_dir, \"#{output_filename}.pdf\")\n end", "def list_anat_files(options = {}) \n subs = all_subjects\n filt_subs = looked_for(options[:subjects])\n filt_sess = looked_for(options[:sessions])\n filt_types = looked_for(options[:t_types] || [ \"T1\", \"T2\" ]) \n scans_list = all_scans(subs, filt_subs, filt_sess, filt_types) \n results = []\n scans_list.each do |scan_dir|\n resources_path = \"#{scan_dir}/Resources\"\n scanlist_type = list_files(\"#{resources_path}\", :directory).map { |e| Pathname.new(e.name).basename.to_s }\n scanlist_type.each do |scan_type|\n scanlist = list_files(\"#{resources_path}/#{scan_type}/Files\", :regular).map { |e| Pathname.new(e.name).basename.to_s }\n if (! options[:ext].blank?) && options[:ext].is_a?(Regexp)\n scanlist = scanlist.select { |scan| scan.match(options[:ext]) }\n end\n scanlist.each { |scan| results << \"#{resources_path}/#{scan_type}/Files/#{scan}\" }\n end \n end\n results\n end", "def generate_file_files( options, files, classes )\n\t\tdebug_msg \"Generating file documentation in #@outputdir\"\n\t\ttemplatefile = @template_dir + 'filepage.rhtml'\n\n\t\tfiles.sort_by {|k,v| k }.each do |path, fileinfo|\n\t\t\toutfile = @outputdir + fileinfo[:outfile]\n\t\t\tdebug_msg \" working on %s (%s)\" % [ path, outfile ]\n\t\t\trel_prefix = @outputdir.relative_path_from( outfile.dirname )\n\t\t\tcontext = binding()\n\n\t\t\tdebug_msg \" rending #{outfile}\"\n\t\t\tself.render_template( templatefile, binding(), outfile )\n\t\tend\n\tend", "def get_output(file)\n file_dir = File.dirname(file)\n file_name = File.basename(file).split('.')[0..-2].join('.')\n unless file_name =~ /\\.js$/\n file_name << \".js\"\n end\n \n file_dir = file_dir.gsub(Regexp.new(\"#{@options[:input]}(\\/){0,1}\"), '') if @options[:input]\n file_dir = File.join(@options[:output], file_dir) if @options[:output]\n\n if file_dir == ''\n file_name\n else\n File.join(file_dir, file_name)\n end\n end", "def bundle_files(files)\n output = \"\"\n files.select { |f| !f.content.empty? }.each do |file|\n content = file.content\n path = file.path\n output << bundled_file_header(path) \n output << include_imported_files(content, path) if file.type[:ext] == 'css'\n content << javascript_fix if file.type[:ext] == '.js' \n output << content\n output << bundled_file_footer(path)\n end\n output\n end", "def pilot_flight_result_files\n pfs = self.data_directory.find_all { |name| name =~ /^pilot_p\\d{3}s\\d\\d\\.htm\\.yml$/ }\n pfs.collect { |fn| File.join(self.data_directory.path, fn) }\n end", "def output_map\n @output_map ||= nil\n return @output_map if @output_map\n path_watch = {}\n @output_entry_map = {}\n Find::find( skel_path ) do |path|\n path.untaint\n if File.basename(path)[0] == ?.\n Find.prune \n elsif not FileTest.directory? path\n tpl_path = path.gsub( /^#{ Regexp::quote( skel_path ) }\\/?/, '' )\n output = outputs.detect { |p| if tpl_path =~ /\\.#{ p.extension }$/; tpl_path = $`; end }\n if output\n ## Figure out template extension and output filename\n page_name, tpl_ext = tpl_path.dup, ''\n while page_name =~ /\\.\\w+$/; page_name = $`; tpl_ext = $& + tpl_ext; end\n next if tpl_ext.empty?\n ## Build the output pages\n build_pages( page_name ) do |vars|\n ## Extension and Path\n vars[:page].add_ext( tpl_ext )\n vars[:template] = path\n vars[:output] = output\n eid = ( vars[:entry] && vars[:entry].id ) || page_name\n if not @output_entry_map[ eid ]\n @output_entry_map[ eid ] = vars\n elsif tpl_ext.split( '.' )[1] == central_ext\n @output_entry_map[ eid ] = vars\n end\n\n ## If output by a deeper page, skip\n pub_name, = path_watch[vars[:page].link]\n next if pub_name and !( vars[:page].link.index( page_name ) == 0 and\n page_name.length > pub_name.length )\n\n path_watch[vars[:page].link] = [page_name, vars]\n end\n end\n end\n end\n @output_map = {}\n path_watch.each_value do |page_name, vars|\n @output_map[page_name] ||= []\n @output_map[page_name] << vars\n end\n @output_map\n end", "def foreach\n find_files.each do |file|\n yield(@build_result_dir, file)\n end\n end", "def find_files\n find_files_recursive(@build_result_dir, '')\n end", "def write_meta_files\r\n @files.each do |file|\r\n template = ERB.new(\r\n File.new(File.join(@dirs[:templates],file[:template])).read,\r\n nil, \"%\")\r\n output_file = File.new(file[:path], \"w+\")\r\n output_file.write(template.result(binding))\r\n output_file.close\r\n end\r\n end", "def outputFN( input )\n # First: if we are in a temp folder put it where the script is... \n # otherwise we drop it in the temp folder. This only happens with OCRA.\n tmp = /\\W(temp|tmp)\\W/i\n inreg = /\\Win(put)?$/i\n\n if File.dirname( input ) =~ inreg\n File.expand_path( File.join( File.dirname( input ), \"..\", \"out\" , File.basename( input, \".py\" ) ) )\n elsif tmp =~ File.dirname( __FILE__ )\n if tmp =~ File.dirname( input )\n \"\" # they can choose a directory manually\n else\n File.join File.dirname( input ), File.basename( input, \".py\" )\n end\n else\n File.join File.dirname( __FILE__ ), \"out\", File.basename( input, \".py\" ) \n end\nend", "def gen_sub_directories\n\t\[email protected]\n\tend", "def gen_sub_directories\n\t\[email protected]\n\tend", "def process_outputs(args)\n @input = @generator = @window = @fill_value = nil # Cover our tracks.\n\n if (name = args[:out_file])\n IO.write(name, @output, mode: \"wb\")\n elsif (out_str = args[:out_str])\n out_str << @output\n end\n\n (_, @output = @output, nil)[0] # Return output and erase it.\n end", "def create_output_directories\n return unless @option_output_path\n subdirectory = \"#{@option_output_path}/nodes\"\n return @option_output_path if File.directory?(@option_output_path) && File.directory?(subdirectory)\n Dir.mkdir(@option_output_path)\n output_path_error_and_exit(@option_output_path) unless File.directory?(@option_output_path)\n Dir.mkdir(subdirectory)\n output_path_error_and_exit(subdirectory) unless File.directory?(subdirectory)\n @option_output_path\n end", "def populate_output_path(options = {})\n base = Pathname.new(@source_path).basename.to_s\n if options.empty?\n result = base\n else\n name, ext = *base.split(\".\")\n if options[:output_path].nil? || File.directory?(options[:output_path])\n tokens = \"\"\n MODULES.each do |mod|\n token = mod.filename_token(options)\n tokens += \"-#{token}\" unless token.nil?\n end\n result = options[:output_path].nil? ? \"\" : \"#{options[:output_path].to_s}/\"\n result += \"#{name}#{tokens}.#{ext}\"\n elsif !options[:output_path].nil?\n result = \"#{options[:output_path].to_s}.#{ext}\"\n end\n end\n @path = Pathname.new(result)\n end", "def output_file\n raise \"Invalid empty feed_name()\" if feed_name.nil? || feed_name.empty?\n path = [\n output_root,\n feed_name,\n batch_id || \"output\"\n ].compact.join(\"/\") + \".\" + output_extension\n end", "def generate_output(inputs, output)\n inputs.each do |input|\n begin\n raise error (\"I need a file to compile\") if not input.respond_to?(:read)\n\n #puts \"tsc: #{input.path} \" << options.join(\" \")\n\n #Using compile_file because it gives us better error messages\n result = TypeScript::Node::compile_file(input.fullpath, options)\n if result.success?\n output.write(result.js)\n else\n raise result.stderr\n end\n rescue ExecJS::Error => error\n raise error, \"Error compiling #{input.path}. #{error.message}\"\n rescue RuntimeError => e\n raise e, \"Error compiling #{input.path}. #{e.message}\"\n end\n end\n end", "def execute\n settings = read_settings\n input, remove_regexp, output = get_in_out_removes(settings)\n create_output_dir(output)\n Dir.glob(input).each do |file|\n input_src = get_input(file)\n replaced = remove_head(input_src.encode('UTF-16BE', 'UTF-8', invalid: :replace, undef: :replace, replace: '?').encode('UTF-8'), remove_regexp)\n output_file(file, replaced, output)\n end\n end", "def generate_file_files( options, files, classes )\n\t\tdebug_msg \"Generating file documentation in #@outputdir\"\n\t\ttemplatefile = @template_dir + 'filepage.rhtml'\n\n\t\tmodsort = self.get_sorted_module_list( classes )\n\n\t\tfiles.sort_by {|k,v| k }.each do |path, fileinfo|\n\t\t\toutfile = @outputdir + fileinfo[:outfile]\n\t\t\tdebug_msg \" working on %s (%s)\" % [ path, outfile ]\n\t\t\trel_prefix = @outputdir.relative_path_from( outfile.dirname )\n\t\t\tcontext = binding()\n\n\t\t\tdebug_msg \" rendering #{outfile}\"\n\t\t\tself.render_template( templatefile, binding(), outfile )\n\t\tend\n\tend", "def group_fastq_files starting_path, output_path, options = {:prefix => \"L\", :suffix => \".fastq.gz\", :exclude_undetermined => true}\n execute \"mkdir -p #{output_path}\"\n fastq_groups = []\n \n fastq_files = Dir.glob(File.join(starting_path, fastq_search_path))\n if fastq_files.empty?\n log \"# ERROR: no fastq files found in #{starting_path}\" if fastq_files.empty?\n else\n log \"# #{fastq_files.size} fastq files found in #{starting_path}\"\n fastq_file_data = get_file_data fastq_files, \"\\.fastq\\.gz\"\n fastq_groups = group_files fastq_file_data, output_path, options\n end\n fastq_groups\n end", "def files\n @exported_pr_dir ? Dir.glob(@exported_pr_dir) : []\n end", "def compile_files(files)\n files.each do |base_path|\n # We do this second glob in case the path provided in the tayfile\n # references a compiled version\n Dir[@base_dir.join('src', base_path + '*')].each do |path|\n path = Pathname.new(path).relative_path_from(@base_dir.join('src'))\n file_in_path = @base_dir.join('src', path)\n file_out_path = asset_output_filename(@output_dir.join(path), @sprockets.engines.keys)\n\n if @sprockets.extensions.include?(path.extname)\n content = @sprockets[file_in_path].to_s\n else\n content = File.read(file_in_path)\n end\n\n FileUtils.mkdir_p(file_out_path.dirname)\n File.open(file_out_path, 'w') do |f|\n f.write content\n end\n end\n end\n end", "def copy_files(output_folder)\n copy_file_list(output_folder, @config.files.copy)\n end", "def write_output\n # If an output directory is given, open a file & write to it\n if output = self.output\n @path_decls.each do |path, decls|\n name = snake_case(decls.name.name.to_s.dup)\n file_path = File.join(output, \"#{name}.rbs\")\n File.open(file_path, 'w') do |io|\n stdout.puts \"Writing output to file: #{file_path}\"\n RBS::Writer.new(out: io).write([decls])\n end\n end\n # If no output directory is given write to STDOUT\n else\n RBS::Writer.new(out: stdout).write(@path_decls.values)\n end\n end", "def getFiles(theArgs)\n\n\ttheFiles = [];\n\tpathsExclude = theArgs[:exclude];\n\n\ttheArgs[:paths].each do |pathRoot|\n\t\n\t\tif (File.exist?(pathRoot))\n\t\t\tFind.find(pathRoot) do |thePath|\n\t\t\t\tif (File.file?(thePath))\n\n\t\t\t\t\tif (!pathsExclude.include?(thePath))\n\t\t\t\t\t\tif (!FILES_EXCLUDE.include?(File.basename(thePath)))\n\t\t\t\t\t\t\ttheFiles << thePath;\n\t\t\t\t\t\tend\n\t\t\t\t\tend\n\n\t\t\t\tend\n\t\t\tend\n\t\telse\n\t\t\tputs \"Skipping #{pathRoot}, file not found\";\n\t\tend\n\tend\n\n\treturn theFiles;\n\nend", "def files_filtering files\n return files unless @file_regexp\n f = files.select do |file|\n test_name_by_date file\n end\n f\n end", "def test_extract(files=nil)\n output = 'test/embedded' # Don't think output should be setable.\n\n files = files || 'lib/**/*.rb'\n files = 'lib/**/*.rb' if TrueClass == files\n files = [files].flatten.compact\n\n filelist = files.collect{ |f| Dir.glob(f) }\n filelist.flatten!\n if filelist.empty?\n puts \"No scripts found from which to extract tests.\"\n return\n end\n\n FileUtils.mkdir_p(output) unless File.directory?(output)\n\n #vrunner = VerbosityRunner.new(\"Extracting\", verbosity?)\n #vrunner.setup(filelist.size)\n\n filelist.each do |file|\n #vrunner.prepare(file)\n\n testing = extract_test_from_file( file )\n if testing.strip.empty?\n status = \"[NONE]\"\n else\n complete_test = create_test(testing, file)\n libpath = File.dirname(file)\n testfile = \"test_\" + File.basename(file)\n fp = File.join(output, libpath, testfile)\n unless File.directory?( File.dirname(fp))\n FileUtils.mkdir_p(File.dirname(fp))\n end\n File.open(fp, \"w\"){ |fw| fw << complete_test }\n status = \"[TEST]\"\n end\n\n #vrunner.complete(file, status)\n end\n\n #vrunner.finish(\n # :normal => \"#{filelist.size} files had tests extracted.\",\n # :check => false\n #)\n end", "def single_ucf_file_lists\n File.open(single_bad_ucf_file, 'a') do |mergedfile|\n Dir.glob(\"#{output_directory_path}*name.txt\").each do |file|\n File.foreach(file) do |line|\n mergedfile.write(line)\n end\n end\n end\n end", "def generate_output_file(zip_out, contents); end", "def package_files\n (rdoc_files + lib_files + tests + doc_files + \n programs + extra_files + extension_files).uniq\n end", "def files\n result = []\n @my_files.each do |f|\n result << f.fname if FileTest.file?(f.fname)\n end\n result\n end", "def generate_fastq\n\n # Generate FASTQ file list, expanding patterns if found.\n fastq_input_file_list = []\n fastq_output_prefix_list = []\n fastq_output_group_list = []\n ARGV.each do |fastq_input_file|\n if fastq_input_file =~ /[\\+\\?\\*]/\n # File is regexp: use it to do our own \"glob\".\n # If the regexp has at least one group in it, save the group match\n # in a corresponding list to use in making the output files.\n fastq_input_dir = File.dirname(fastq_input_file)\n fastq_input_patt = File.basename(fastq_input_file)\n\n Dir.entries(fastq_input_dir).sort().each do |entry|\n if entry =~ /#{fastq_input_patt}()/o\n fastq_input_file_list << entry\n if not @out_prefix.nil?\n fastq_output_prefix_list << @out_prefix\n else\n fastq_output_prefix_list << entry[0..Regexp.last_match.begin(1)-1-1] # Second -1 is for underline.\n end\n fastq_output_group_list << $1\n end\n end\n else\n if File.file? fastq_input_file\n fastq_input_file_list << fastq_input_file\n fastq_output_prefix_list << @out_prefix\n end\n end\n end\n\n die \"no FASTQ files found\" if fastq_input_file_list.length == 0\n\n STDERR.puts(\"Input files: #{fastq_input_file_list}\") if @verbose\n\n fastq_list = fastq_input_file_list.zip(fastq_output_prefix_list, fastq_output_group_list)\n fastq_list.each do |fastq_input_file, fastq_output_prefix, fastq_output_group|\n\n # If we are splitting to subfiles, reset the output sub filenames to\n # the new destination for the new input file; also reset statistics.\n if @save_subfiles\n if fastq_output_group == \"\"\n fastq_output_group_mod = fastq_output_group\n else\n fastq_output_group_mod = \"_#{fastq_output_group}\"\n end\n @pass_sub_filename = File.join(@pass_dir, \"#{fastq_output_prefix}_pf#{fastq_output_group_mod}.fastq\")\n @pass_sub_filename += \".gz\" if @compress\n @reject_sub_filename = File.join(@reject_dir, \"#{fastq_output_prefix}_reject#{fastq_output_group_mod}.fastq\")\n @reject_sub_filename += \".gz\" if @compress\n\n @stats_sub_filename = File.join(@stats_dir, \"#{fastq_output_prefix}_seq_stats#{fastq_output_group_mod}.txt\")\n @pass_sub_read_cnt = @reject_sub_read_cnt = @total_sub_read_cnt = 0\n end\n\n if @save_subfiles\n open_fastq_sub_output_files\n end\n\n # split one FASTQ file into post-filter and reject FASTQ\n STDERR.puts \"Processing #{fastq_input_file}...\" if @verbose\n fastq_input_fp = open_fastq_input(fastq_input_file)\n if fastq_input_fp.nil?\n warn \"#{fastq_input_file} is empty...skipping\"\n next\n end\n begin\n while fastq_input_fp.readline\n header_line = $_\n if header_line !~ /^@/\n STDERR.puts \"Missing header line (#{header_line})...exiting\"\n exit(-1)\n end\n\n header_fields = header_line.split(/[ _]/)\n die \"header parse error at #{fastq_input_file}:#{$INPUT_LINE_NUMBER} [#{header_fields.join(\"!\")}]\" if header_fields.size != 2\n\n sub_header_fields = header_fields[1].split(\":\",-1)\n die \"sub header parse error at #{fastq_input_file}:#{$INPUT_LINE_NUMBER} [#{header_fields.join(\":\")}(#{sub_header_fields.join(\":\")})]\" if sub_header_fields.size != 4\n\n @total_read_cnt += 1\n @total_sub_read_cnt += 1\n\n if sub_header_fields[1] == \"N\"\n out = @pass\n @pass_read_cnt += 1\n out_sub = @pass_sub\n @pass_sub_read_cnt += 1\n elsif sub_header_fields[1] == \"Y\"\n out = @reject\n @reject_read_cnt += 1\n out_sub = @reject_sub\n @reject_sub_read_cnt += 1\n else\n die \"filter field value error at #{fastq_input_file}:#{$INPUT_LINE_NUMBER}...skipping read\"\n out = nil\n end\n\n # Read the rest of the sequence.\n seq_line = fastq_input_fp.readline\n plus_line = fastq_input_fp.readline\n if plus_line !~ /^\\+/\n STDERR.puts \"Malformed FASTQ +line (#{plus_line})\"\n end\n qual_line = fastq_input_fp.readline\n\n # Output the sequence to whatever file was chosen above.\n if !out.nil?\n if not @remove_spaces\n out.print \"#{header_line}\"\n out_sub.print \"#{header_line}\" if not out_sub.nil?\n else\n out.puts header_fields.join(\"_\")\n out_sub.puts header_fields.join(\"_\") if not out_sub.nil?\n end\n out.print \"#{seq_line}\"\n out.print \"#{plus_line}\"\n out.print \"#{qual_line}\"\n if not out_sub.nil?\n out_sub.print \"#{seq_line}\"\n out_sub.print \"#{plus_line}\"\n out_sub.print \"#{qual_line}\"\n end\n end\n end # while\n\n rescue EOFError\n\n end\n\n fastq_input_fp.close()\n\n if @save_subfiles\n close_fastq_sub_output_files\n store_stats @stats_sub_filename, @pass_sub_read_cnt, @reject_sub_read_cnt, @total_sub_read_cnt\n end\n\n end # fastq_list.each\n end", "def add_to_all_results_file(filename,output)\n self.add_to_file(filename,output)\n end", "def output_path; end", "def dependent_files\n processed.map(&:abs_path).compact.select { |fn| File.exist?(fn) }\n end", "def init_files(input, output)\n can_read?(input)\n\n @output_file = prepare_write_file(output)\n @error_file = prepare_write_file(ERROR_FILE)\n\n @output_file << LineParser::RESULT_HEADER.to_csv\n @error_file << LineParser::ERROR_HEADER.to_csv\n end", "def build_input_files\n @input_files.map { |file| build_file(file) }\n end", "def concat\n content = \"\"\n files = []\n @opts[:files].each do |file|\n files += if single? file\n [\"#{@opts[:input_dir]}/#{file}.#{@opts[:type]}\"]\n else\n expand file\n end\n end\n files.each do |file|\n content << File.read(file)\n content << \"\\n\"\n end\n\n if @opts[:outputs]\n @opts[:outputs].each do |name|\n output = \"#{name}.#{@opts[:type]}\"\n local = content\n local = @opts[:filter].call(output, local) if @opts[:filter]\n File.open(output, \"w\"){ |f| f.write local.strip }\n UI.info \"Concatenated #{output}\"\n end\n else\n content = @opts[:filter].call(@output, content) if @opts[:filter]\n File.open(@output, \"w\"){ |f| f.write content.strip }\n UI.info \"Concatenated #{@output}\"\n end\n end", "def generate_output\n write_average_fitness('output/average.txt')\n write_best_fitness('output/best.txt')\n write_survivors('output/survivors.txt')\n write_traits('output/traits.txt')\n end", "def build_output_file_name(from_file, to_file)\n return if to_file.nil?\n if FileExtensions::EXT.include?(to_file.to_s)\n yield from_file.gsub(/#{File.extname(from_file)}$/, \".#{to_file}\")\n else\n yield \"#{to_file}\"\n end\n end", "def generate_strfiles\n until @files_written.empty?\n file_path = @files_written.shift\n system(\"strfile\",file_path)\n end\n end", "def existing_files(my_files = true)\n # I can do this in a convoluted set of if checks, of a couple readable selects.\n output = target_files.select { |f| File.exist? f }\n output.delete_if { |f| my_files && is_my_file?(f)}\n\n return output\n end", "def test_files\n files = tests\n files = files.map{ |f| Dir[f] }.flatten\n files = files.map{ |f| File.directory?(f) ? Dir[File.join(f, '**/*.rb')] : f }\n files = files.flatten.uniq\n files = files.map{ |f| File.expand_path(f) }\n files\n end", "def compressFiles\n Dir.chdir(\"#{@outputDir}/RDPsummary\")\n #system(\"tar -zcf #{@sampleSetName1}.tar.gz * --exclude=*.log --exclude=*.sra --exclude=*.sff --exclude=*.local.metadata\")\n system(\"tar czf class.result.tar.gz class\")\n system(\"tar czf domain.result.tar.gz domain\")\n system(\"tar czf family.result.tar.gz family\")\n system(\"tar czf genus.result.tar.gz genus\")\n system(\"tar czf order.result.tar.gz order\")\n system(\"tar czf phyla.result.tar.gz phyla\")\n system(\"tar czf species.result.tar.gz species\")\n system(\"tar czf pdf.result.tar.gz 'find . -name `*.pdf`'\")\n Dir.chdir(@scratch)\n end", "def build_output_command(input_files, output_file)\n inputs = input_files.map do |path|\n Sox::File.new(path,\n :type => MEDIATE_TYPE,\n :encoding => MEDIATE_ENCODING,\n :bits => MEDIATE_BITS,\n :channels => @options[:channels],\n :rate => @options[:rate])\n end\n\n output = Sox::File.new(output_file)\n\n builder = CommandBuilder.new(inputs, output, output_options, output_effects)\n builder.build\n end", "def write_filelist(out_file_list)\n result_list.each{|f| log f}\n File.open(out_file_list, \"w\") do |file|\n result_list.each do |item| \n file << item\n file << \"\\n\"\n end\n log \"File list created #{out_file_list}\"\n end \n end", "def write_filelist(out_file_list)\n result_list.each{|f| log f}\n File.open(out_file_list, \"w\") do |file|\n result_list.each do |item| \n file << item\n file << \"\\n\"\n end\n log \"File list created #{out_file_list}\"\n end \n end", "def rec_output(match, out=$stdout)\n out.flush\n #match = (location == dir ? '*' : File.join(dir,'*'))\n files = Dir.glob(match) - exclusions\n files.sort!\n files.each do |file|\n sum = checksum(file,digest)\n sum = sum + ' ' if sum\n out << \"#{sum}#{file}\\n\"\n if File.directory?(file)\n rec_output(File.join(file,'*'), out)\n end\n end\n #return out\n end", "def output_path_without_extension\n File.join output_folder, processed_original_filename_without_extension\n end", "def by_file(first, output)\n qseq = Bio::Ngs::Converter::Qseq.new(options.paired ? :pe : :se)\n buffers = [first] if first.kind_of? String\n buffers = first if first.kind_of? Array\n buffers.each do |file_name|\n qseq.buffer = File.open(file_name,'r') #todo: dir is not used here it could be a bug\n fastq_file = File.open(File.join(options.dir,\"#{output}.fastq\"), (options.append ? 'a' : 'w'))\n qseq.to_fastq do |fastq|\n fastq_file.puts fastq if fastq\n end\n qseq.buffer.close\n fastq_file.close \n #Write the report\n File.open(File.join(options.dir,\"#{output}.stats\"), (options.append ? 'a' : 'w')) do |file|\n file.puts ({:file_name=>file_name, :stats=>qseq.stats}.to_yaml)\n end\n end #buffers\n # puts \"Done #{file_name}\"\n end", "def files\n templates.map(&:filename)\n end", "def compare_common_files(folders)\n @common_conf_files.each do |common_conf_name|\n common_json = fetch_common_json(common_conf_name, folders)\n # Print common file with json here.\n output_common_json(common_json, common_conf_name)\n end\n end", "def get_output_folder\n File.join( Rails.root, 'public', 'output' )\n end", "def generate_files(base_dir,script,mtime=nil)\n\n base_dir ||= Dir.pwd\n\n script.each_pair do |filename,value|\n path = File.join(base_dir,filename)\n if value.instance_of? Hash\n FileUtils.mkdir_p(path)\n if mtime\n File.utime(mtime,mtime,path)\n end\n generate_files(path,value,mtime)\n else\n FileUtils.write_text_file(path,value)\n if mtime\n File.utime(mtime,mtime,path)\n end\n end\n end\n end", "def collect_sources_and_toolchains\n sources_to_build = {}\n\n exclude_files = Set.new\n exclude_sources.each do |p|\n if p.include?(\"..\")\n Printer.printError \"Error: Exclude source file pattern '#{p}' must not include '..'\"\n return nil\n end\n\n Dir.glob(p).each {|f| exclude_files << f}\n end\n files = Set.new # do not build the same file twice\n\n add_to_sources_to_build(sources_to_build, exclude_files, sources)\n\n source_patterns.each do |p|\n if p.include?(\"..\")\n Printer.printError \"Error: Source file pattern '#{p}' must not include '..'\"\n return nil\n end\n\n globRes = Dir.glob(p)\n if (globRes.length == 0)\n Printer.printWarning \"Warning: Source file pattern '#{p}' did not match to any file\"\n end\n add_to_sources_to_build(sources_to_build, exclude_files, globRes, tcs4source(p))\n end\n return sources_to_build\n end", "def get_out_filename(param_index)\n filename_str = get_next_parameter(param_index)\n $options[:out_filename] = Pathname.new( filename_str ) unless filename_str.nil?\nend" ]
[ "0.74933153", "0.7390097", "0.7365953", "0.7365953", "0.6977151", "0.695596", "0.65952736", "0.6273493", "0.622142", "0.61954045", "0.61948967", "0.6146341", "0.6115349", "0.596583", "0.5951703", "0.59373564", "0.5876065", "0.5872588", "0.582961", "0.5817462", "0.58139235", "0.5794319", "0.5783217", "0.57712746", "0.5753196", "0.5745496", "0.5714717", "0.5712812", "0.5693858", "0.56644213", "0.5641267", "0.56402427", "0.5639017", "0.5629126", "0.56268466", "0.56157774", "0.5613678", "0.5613119", "0.5605223", "0.5603496", "0.5599157", "0.5589051", "0.5574906", "0.5574473", "0.5570546", "0.5557409", "0.5553286", "0.55530405", "0.5551556", "0.5535466", "0.5532756", "0.5532531", "0.55139685", "0.55118906", "0.548676", "0.548676", "0.54866326", "0.5473767", "0.5469731", "0.5466494", "0.54628044", "0.54620314", "0.5457334", "0.5455755", "0.5450561", "0.54498625", "0.54413205", "0.54293007", "0.542911", "0.54283863", "0.54235244", "0.5420663", "0.5414842", "0.54059786", "0.5405663", "0.54024553", "0.54014385", "0.53971756", "0.5393004", "0.5392493", "0.5389571", "0.5387083", "0.53840584", "0.5378992", "0.5378922", "0.5374948", "0.5371419", "0.5368041", "0.5359575", "0.5356579", "0.5356579", "0.53494155", "0.53475547", "0.52824914", "0.5281908", "0.52686375", "0.52674794", "0.5265899", "0.5262354", "0.5261491" ]
0.82316995
0
Creates a new author time.
def initialize(author, time) @author = author @time = time freeze end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_time=(time)\n @create_time = time\n update_times\n end", "def create\n self[:created] = Time.now.to_s\n save\n end", "def time\n Time.parse(inner_author.date.to_s)\n end", "def create(hour, minute, second, usec = 0)\n t = date\n meth = Sequel.application_timezone == :utc ? :utc : :local\n send(meth, t.year, t.month, t.day, hour, minute, second, usec)\n end", "def create_time(); @create_time; end", "def local(*args)\n create(Time.utc(*args))\n end", "def create_timestamp\n self.created_at = Time.now\n end", "def create_timestamp\n self.created_at = Time.now\n end", "def creation_time # :nodoc:\n @creation_time.dup\n end", "def created_time\n ctime\n end", "def record_tweet(tweet_name, time)\n \n end", "def created; BoxesConsoleHelpers.simple_time(created_at) rescue \"nil\"; end", "def create(time)\n TZTime::LocalTime.new(time, @time_zone)\n end", "def set_experiment_created_at(_experiment, _time)\n raise \"Not implemented\"\n end", "def setCreatedTime(createdTime)\r\n\t\t\t\t\t@createdTime = createdTime\r\n\t\t\t\tend", "def setCreatedTime(createdTime)\r\n\t\t\t\t\t@createdTime = createdTime\r\n\t\t\t\tend", "def set_experiment_created_at(experiment, time)\n record = VanityExperiment.find_by_experiment_id(experiment.to_s) ||\n VanityExperiment.new(experiment_id: experiment.to_s)\n record.created_at ||= time\n record.save\n end", "def insert_time_entry cat_id, description ,time=nil\n entry = DB::Time_entries.find_by(finishtime: nil)\n entry.finishtime=time\n entry.save if DB::Time_entries.create(category_id: cat_id, name: description, starttime: time)\n end", "def create_record(authority:, action:, dt_stamp: QaServer::TimeService.current_time)\n create(dt_stamp: dt_stamp,\n authority: authority,\n action: action)\n end", "def touch!\n @created = Time.now.to_i\n end", "def before_create\n temp_time = Time.sl_local\n self.created_at = temp_time\n self.modified_at = temp_time\n end", "def update!(**args)\n @create_time = args[:create_time] if args.key?(:create_time)\n end", "def created_at=(_arg0); end", "def create\n @time_entry = TimeEntry.new(params[:time_entry])\n\n respond_to do |format|\n if @time_entry.save\n format.html { redirect_to @time_entry, notice: 'Time entry was successfully created.' }\n format.json { render json: @time_entry, status: :created, location: @time_entry }\n else\n format.html { render action: \"new\" }\n format.json { render json: @time_entry.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @time_entry = TimeEntry.new(params[:time_entry])\n\n respond_to do |format|\n if @time_entry.save\n format.html { redirect_to @time_entry, notice: 'Time entry was successfully created.' }\n format.json { render json: @time_entry, status: :created, location: @time_entry }\n else\n format.html { render action: \"new\" }\n format.json { render json: @time_entry.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @time_entry = TimeEntry.new(params[:time_entry])\n\n respond_to do |format|\n if @time_entry.save\n format.html { redirect_to @time_entry, notice: 'Time entry was successfully created.' }\n format.json { render json: @time_entry, status: :created, location: @time_entry }\n else\n format.html { render action: \"new\" }\n format.json { render json: @time_entry.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @time_entry = TimeEntry.new(time_entry_params)\n\n respond_to do |format|\n if @time_entry.save\n format.html { redirect_to @time_entry, notice: 'Time entry was successfully created.' }\n format.json { render :show, status: :created, location: @time_entry }\n else\n format.html { render :new }\n format.json { render json: @time_entry.errors, status: :unprocessable_entity }\n end\n end\n end", "def initialize\n @created_at = Time.now\n end", "def initialize(time = nil)\n # Set default values.\n @created_at = time || Event.time\n end", "def make_new_tag( time=Time.now )\n\treturn time.strftime( TAG_TIMESTAMP_FORMAT )\nend", "def time name, description: nil, mode: :nullable, policy_tags: nil\n record_check!\n\n add_field name, :time, description: description, mode: mode, policy_tags: policy_tags\n end", "def create\n @timecard = current_user.timecards.build(params[:timecard])\n\n if @timecard.save\n redirect_to timecards_path, :notice => 'Timecard was successfully created.' \n else\n render :action => \"new\"\n end\n\n end", "def now\n create(@time_zone.utc_to_local(Time.now.utc))\n end", "def creation_time\n data[:creation_time]\n end", "def enter_created; end", "def create\n @member_time = MemberTime.new(params[:member_time])\n\n respond_to do |format|\n if @member_time.save\n format.html { redirect_to @member_time, notice: 'Member time was successfully created.' }\n format.json { render json: @member_time, status: :created, location: @member_time }\n else\n format.html { render action: \"new\" }\n format.json { render json: @member_time.errors, status: :unprocessable_entity }\n end\n end\n end", "def create_new_user(github)\n user = User.new\n user.github_id = github.user.id\n user.email = github.user.email\n user.plan = 'basic'\n user.created_at = Time.now\n user.updated_at = Time.now\n user.token = github.access_token\n user.save\n flash[:info] = \"Hi friend! Let's start you out by creating your first book.\"\nend", "def create\n @timeentry = Timeentry.new(params[:timeentry])\n\n respond_to do |format|\n if @timeentry.save\n format.html { redirect_to @timeentry, notice: 'Timeentry was successfully created.' }\n format.json { render json: @timeentry, status: :created, location: @timeentry }\n else\n format.html { render action: \"new\" }\n format.json { render json: @timeentry.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @topic = Topic.new(params[:topic])\n if Topic.find_by_timeint(Topic.current_timeint(Time.now))\n @topic.timeint = Topic.add_timeint(Topic.last.timeint)\n else\n @topic.timeint = Topic.current_timeint(Time.now)\n end\n\n respond_to do |format|\n if @topic.save\n format.html { redirect_to @topic, notice: 'Topic was successfully created.' }\n format.json { render json: @topic, status: :created, location: @topic }\n else\n format.html { render action: \"new\" }\n format.json { render json: @topic.errors, status: :unprocessable_entity }\n end\n end\n end", "def create_time\n Convert.timestamp_to_time @grpc.create_time\n end", "def create_time\n Convert.timestamp_to_time @grpc.create_time\n end", "def create\n unless current_user.system?\n render status: 403, nothing: true and return\n end\n\n @member = Member.find(params[:member_id])\n @timecard = @member.timecards.build(timecard_params)\n\n if @timecard.save\n render nothing: true, status: :created, location: member_timecard_url(@member, @timecard)\n else\n render status: 400, nothing: true\n end\n end", "def create(filename)\n time = Time.now\n @files[filename] = time\n puts \"#{filename} was created by #{@username} at #{time}.\"\n end", "def add_tutorial(day, time, location, tutor, abbrev)\n tutor_role = unit_roles.where(\"user_id=:user_id\", user_id: tutor.id).first\n if tutor_role.nil? || tutor_role.role == Role.student\n return nil\n end\n\n Tutorial.find_or_create_by( { unit_id: id, abbreviation: abbrev } ) do |tutorial|\n tutorial.meeting_day = day\n tutorial.meeting_time = time\n tutorial.meeting_location = location\n tutorial.unit_role_id = tutor_role.id\n end\n end", "def generate_author(author_name, author_ff)\n author = Author.find_by_name(author_name)\n if !author # if it doesn't exist, add it\n generate_log(\"Generating New Author: #{author_name}\")\n author = Author.new()\n author.name = author_name\n author.ff_id = author_ff\n else\n generate_log(\"Author Exists: #{author_name}\")\n end\n author\nend", "def create_record(url, ref)\n date_time = Faker::Time.between(@now - 10, @now).utc\n Entry.create(\n url: url,\n referrer: ref,\n created_at: date_time,\n )\nend", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def created_date_time=(value)\n @created_date_time = value\n end", "def create\n author = Author.create!(author_params)\n auth_token = AuthenticateAuthor.new(author.email, author.password).call\n response = { message: Message.account_created, auth_token: auth_token }\n json_response(response, :created)\n end", "def create\n @allocated_time = AllocatedTime.new(allocated_time_params)\n\n respond_to do |format|\n if @allocated_time.save\n format.html { redirect_to @allocated_time, notice: 'Allocated time was successfully created.' }\n format.json { render :show, status: :created, location: @allocated_time }\n else\n format.html { render :new }\n format.json { render json: @allocated_time.errors, status: :unprocessable_entity }\n end\n end\n end", "def set_time(time)\n @time = time.getutc\n return self\n end", "def createAuthor(name=nil)\n params = {}\n params[:name] = name unless name.nil?\n call :createAuthor, params\n end", "def creation_date=(_); end", "def add_creator_as_member\n Membership.add_membership(created_by_id, id)\n end", "def creationtime\r\n\t\t\t`#{BITS::BITSADMIN} /getcreationtime {#{@id}}`\r\n\t\tend", "def create\n @timecard = Timecard.new(timecard_params)\n @timecard.update_attribute(:user_id, current_user.id)\n\n respond_to do |format|\n if @timecard.save\n format.html { redirect_to @timecard, notice: 'Timecard was successfully created.' }\n format.json { render :show, status: :created, location: @timecard }\n else\n format.html { render :new }\n format.json { render json: @timecard.errors, status: :unprocessable_entity }\n end\n end\n end", "def before_create; self.created_on = Time.now.utc; end", "def generate\n\t\tif self.token == nil\n\t\t\tself.token = SecureRandom.hex\n\t\tend\n\t\ta = Mashup.create! name: 'temporal'\n\t\tself.mashup_id = a.id\n\tend", "def create\n @question = Question.new(question_params)\n @question.user = current_user\n @question.time = DateTime.now()\n\n respond_to do |format|\n if @question.save\n format.html { redirect_to root_path }\n else\n format.html { render :new }\n format.json { render json: @question.errors, status: :unprocessable_entity }\n end\n end\n end", "def create(*)\n super.tap do\n __debug_sim('USER initiates submission of new entries.')\n end\n end", "def asctime() end", "def create_username_changed_timestamp\n self.update_attribute(:username_changed_at, 2.months.ago)\n end", "def creator\n \"Made by ROUINEB Hamza. 2016\"\n end", "def create(record)\n now = Time.now\n record['created_at'] ||= now\n record['updated_at'] ||= now\n collection.insert(record)\n end", "def create_new_user\r\n touch(\"* id:'#{add}'\")\r\n end", "def create(filename)\n time = Time.now\n @files[filename] = time # Updates the \"files\" hash with the timestamp for when the file was created\n puts \"The file #{filename} was created at #{time}\"\n end", "def record_creation_timestamp(path, timestamp)\n # Hook method: Linux filesystems doesn't store creation datetime\n end", "def set_hour_created\n self.hour_created = Time.now.in_time_zone('Eastern Time (US & Canada)').hour\n end", "def create_time_request\n TimeRequest.create(\n time: [ Time.new(2000, 1, 1, 14, 0, 0, \"+00:00\").utc, Time.new(2000, 1, 1, 13, 0, 0, \"+00:00\").utc, Time.new(2000, 1, 1, 12, 0, 0, \"+00:00\").utc ].sample,\n reservation: Reservation.all.sample,\n check_in: [true, false].sample,\n status: 'pending'\n )\nend", "def find_or_create_personal_time_for(course_user)\n personal_time = personal_time_for(course_user)\n return personal_time if personal_time.present?\n\n personal_time = personal_times.new(course_user: course_user)\n reference_time = reference_time_for(course_user)\n personal_time.start_at = reference_time.start_at\n personal_time.end_at = reference_time.end_at\n personal_time.bonus_end_at = reference_time.bonus_end_at\n personal_time\n end", "def create\n create_entry\n end", "def create\n \n if params[:via] == \"email\"\n input = params[:plain]\n input.gsub!(/> /, '')\n input = input.split(/---.*\\n-.*\\n/)[1]\n input = \"---\\n-\\n\" + input\n input.gsub!(/\\n\\*\\*/, \"\\n \")\n input.gsub!(/\\n\\*/, \"\\n \")\n \n user = User.find_by_email(params[:from])\n workdate = Date.strptime(params[:subject].split(/Your Timecard for /).last,\"%A %B %d\")\n \n \n @timecard = Timecard.new(:user_id => user.id, :cardtext => input, :workdate => workdate)\n \n if @timecard.save\n logger.info \"Emailed timecard was successfully saved.\"\n redirect_to (:root)\n else\n logger.info \"Emailed timecard was not saved\"\n redirect_to (:root)\n end\n else\n @timecard = Timecard.new(params[:timecard])\n\n @timecard.user_id = current_user.id\n\n respond_to do |format|\n if @timecard.save\n format.html { redirect_to(@timecard, :notice => 'Timecard was successfully created.') }\n format.xml { render :xml => @timecard, :status => :created, :location => @timecard }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @timecard.errors, :status => :unprocessable_entity }\n end\n end\n end\n end", "def create\n @post = current_user.posts.build(post_params)\n\n if params.has_key?(:published_at)\n @post.set_time(params[:published_at])\n else\n @post.set_time(Time.now)\n end\n\n if @post.save\n render json: @post\n else\n render json: { errors: @post.errors }\n end\n end", "def create\n @class_time = ClassTime.new(class_time_params)\n @class_time.user_id = @current_user.id #the user who is currently signed in for this class\n #displays their id\n\n respond_to do |format|\n if @class_time.save\n format.html { redirect_to @class_time, notice: 'Class time was successfully created.' }\n format.json { render action: 'show', status: :created, location: @class_time }\n else\n format.html { render action: 'new' }\n format.json { render json: @class_time.errors, status: :unprocessable_entity }\n end\n end\n end", "def set_created_by\n account.created_by = current_user.id\n account\n end" ]
[ "0.6283268", "0.6208966", "0.5957836", "0.58763534", "0.58646804", "0.5765599", "0.573955", "0.5738108", "0.56839454", "0.567565", "0.56525135", "0.56482875", "0.5640177", "0.5631206", "0.5626972", "0.5626972", "0.56126696", "0.5602776", "0.5591157", "0.5589196", "0.55754215", "0.5571758", "0.551758", "0.55027616", "0.55027616", "0.55027616", "0.5498711", "0.5484141", "0.5466865", "0.54535913", "0.54480195", "0.54464865", "0.5442366", "0.5423302", "0.54222244", "0.5414215", "0.5411464", "0.5396182", "0.5391833", "0.5391467", "0.5391467", "0.5390954", "0.5383511", "0.538257", "0.53816676", "0.53778803", "0.5376466", "0.5376466", "0.5376466", "0.5376466", "0.5376466", "0.5376466", "0.5376466", "0.5376466", "0.5376466", "0.5376466", "0.5376466", "0.5376466", "0.5376466", "0.5376466", "0.5376466", "0.5376466", "0.5376466", "0.5376466", "0.5376466", "0.5376466", "0.5376466", "0.5376466", "0.5376466", "0.5376466", "0.5376466", "0.5376466", "0.5376466", "0.5372767", "0.5362153", "0.53561324", "0.5330946", "0.5322859", "0.531743", "0.53172517", "0.53117985", "0.53058386", "0.5304239", "0.52960163", "0.5293049", "0.52929664", "0.5291516", "0.5285405", "0.5278556", "0.5278143", "0.52728015", "0.5272633", "0.525929", "0.52410954", "0.5237512", "0.5227581", "0.5225675", "0.52249706", "0.52149045", "0.52137417" ]
0.6469384
0
Redirects stderr and stdout to /dev/null.
def silence_output @orig_stderr = $stderr @orig_stdout = $stdout # redirect stderr and stdout to /dev/null $stderr = File.new('/dev/null', 'w') $stdout = File.new('/dev/null', 'w') end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stdout_to_dev_null\n $stdout = File.open('/dev/null', 'w')\n yield\n $stdout = STDOUT\n end", "def stdout_to_dev_null\n $stdout = File.open('/dev/null', 'w')\n yield\n $stdout = STDOUT\n end", "def dev_null(&block)\n orig_stdout = $stdout.dup # does a dup2() internally\n $stdout.reopen('/dev/null', 'w')\n yield\nensure\n $stdout.reopen(orig_stdout)\nend", "def silence_output\n # @orig_stderr = $stderr\n @orig_stdout = $stdout\n \n # redirect stderr and stdout to /dev/null\n # $stderr = File.new('/dev/null', 'w')\n $stdout = File.new('/dev/null', 'w')\nend", "def suppress_output\n $stderr.reopen(\"/dev/null\", \"a\")\n $stdout.reopen($stderr)\n end", "def silence_output\n # Store the original stderr and stdout in order to restore them later\n @original_stderr = $stderr\n @original_stdout = $stdout\n\n # Redirect stderr and stdout\n $stderr = File.new('/dev/null', 'w')\n $stdout = File.new('/dev/null', 'w')\nend", "def dev_null(&block)\n begin\n orig_stdout = $stdout.dup # does a dup2() internally\n $stdout.reopen('/dev/null', 'w')\n yield\n ensure\n $stdout.reopen(orig_stdout)\n end\nend", "def silence_output\n @orig_stderr = $stderr\n @orig_stdout = $stdout\n $stderr = File.new('/dev/null', 'w')\n $stdout = File.new('/dev/null', 'w')\nend", "def redirect\n $stdin.reopen '/dev/null'\n $stdout.reopen File.new(out, \"a\")\n $stderr.reopen File.new(err, \"a\")\n $stdout.sync = $stderr.sync = true\n end", "def suppress_output\n original_stdout, original_stderr = $stdout.clone, $stderr.clone\n $stderr.reopen File.new('/dev/null', 'w')\n $stdout.reopen File.new('/dev/null', 'w')\n yield\nensure\n $stdout.reopen original_stdout\n $stderr.reopen original_stderr\nend", "def suppress_output\n original_stdout = $stdout.clone\n original_stderr = $stderr.clone\n $stderr.reopen File.new('/dev/null', 'w')\n $stdout.reopen File.new('/dev/null', 'w')\n yield\nensure\n $stdout.reopen original_stdout\n $stderr.reopen original_stderr\nend", "def suppress_stdout\n original_stderr = $stderr\n original_stdout = $stdout\n $stderr = File.open(File::NULL, 'w')\n $stdout = File.open(File::NULL, 'w')\n yield\n ensure\n $stderr = original_stderr\n $stdout = original_stdout\n end", "def suppress_output(&block)\n $stdout = File.new(\"/dev/null\", \"w\")\n $stderr = File.new(\"/dev/null\", \"w\")\n result = block.call\n $stdout = STDOUT\n $stderr = STDERR\n result\n end", "def disable_stdout\n @old_stdout = STDOUT.dup\n # via Tomas Matousek, http://www.ruby-forum.com/topic/205887\n STDOUT.reopen(::RUBY_PLATFORM =~ /djgpp|(cyg|ms|bcc)win|mingw/ ? 'NUL' : '/dev/null')\n end", "def redirect_io( simulate = false )\n begin\n STDIN.reopen '/dev/null'\n rescue ::Exception\n end\n\n unless simulate\n STDOUT.reopen '/dev/null', 'a'\n STDERR.reopen '/dev/null', 'a'\n end\n end", "def disable_stdout\n @old_stdout = STDOUT.dup\n STDOUT.reopen(PLATFORM =~ /mswin/ ? \"NUL\" : \"/dev/null\")\n end", "def quiet\n if @suppress_output\n \">/dev/null 2>&1\"\n end\n end", "def silence_output\n # Store the original stderr and stdout in order to restore them later\n @original_stderr = $stderr\n @original_stdout = $stdout\n\n # Redirect stderr and stdout\n $stderr = File.new(log_file, 'w')\n $stdout = File.new(log_file, 'w')\nend", "def silence_stderr\n orig_stderr = $stderr.clone\n $stderr.reopen File.new('/dev/null', 'w')\n yield\n ensure\n $stderr.reopen orig_stderr\n end", "def without_stderr; end", "def try_redirect\n $stdin.reopen(Rack::App::Utils.devnull_path)\n $stdout.reopen(Rack::App::Worker::Environment.stdout)\n $stderr.reopen(Rack::App::Worker::Environment.stderr)\n $stdout.sync = $stderr.sync = true\n rescue Errno::ENOENT\n retry\n end", "def blackhole\n file('/dev/null')\n end", "def stdout_redirect(stdout = T.unsafe(nil), stderr = T.unsafe(nil), append = T.unsafe(nil)); end", "def clear_stderr\n $stderr.string = ''\n end", "def clear_console_output\n return \"\" unless @clear_console_output\n return \"2>/dev/null\" if File.exist?(\"/dev/null\") #Linux console clear\n end", "def without_stderr\n old_stderr = $stderr\n $stderr = StringIO.new\n\n yield\n ensure\n $stderr = old_stderr\n end", "def silence\n old_o, old_e = $stdout, $stderr\n $stdout = StringIO.new\n $stderr = StringIO.new\n\n yield\nensure\n $stdout = old_o if old_o\n $stderr = old_e if old_e\nend", "def silent_out\n stdout = $stdout\n $stdout = StringIO.new\n begin\n yield if block_given?\n ensure\n $stdout = stdout\n end\nend", "def system_silently(command)\n if RUBY_PLATFORM =~ /mswin32/\n null = 'NUL:'\n else\n null = '/dev/null'\n end\n \n system(\"#{command} > #{null} 2> #{null}\")\n end", "def clear_stderr\n $stderr.string = '' if $stderr.is_a?(StringIO)\n end", "def silenced\n $stdout = StringIO.new\n\n yield\n ensure\n $stdout = STDOUT\n end", "def redirect_io(logfile_name)\n begin\n ; STDIN.reopen \"/dev/null\";\n rescue ::Exception;\n end\n\n if logfile_name\n begin\n STDOUT.reopen logfile_name, \"a\"\n STDOUT.sync = true\n rescue ::Exception\n begin\n ; STDOUT.reopen \"/dev/null\";\n rescue ::Exception;\n end\n end\n else\n begin\n ; STDOUT.reopen \"/dev/null\";\n rescue ::Exception;\n end\n end\n\n begin\n ; STDERR.reopen STDOUT;\n rescue ::Exception;\n end\n STDERR.sync = true\n end", "def silence_output\n orig_stdout = $stdout\n $stdout = StringIO.new\n yield\n $stdout = orig_stdout\nend", "def clear_stderr!\n @stderr_handler.clear!\n end", "def redirect_stdio!\n inn, out, err = open(@stdin), open(@stdout, 'a+'), open(@stderr, 'a+')\n no_warn do\n $stdin = Object.const_set(:STDIN, inn)\n $stdout = Object.const_set(:STDOUT, out)\n $stderr = Object.const_set(:STDERR, err)\n end\n end", "def do_quietly?(&block)\n unless @verbose\n old_stdout = $stdout.clone\n $stdout.reopen(File.new(\"/dev/null\", \"w\"))\n begin\n block.call\n rescue Exception => e\n $stdout.reopen old_stdout\n raise e\n ensure\n $stdout = old_stdout\n end\n else\n block.call\n end\n end", "def enable_output\n $stderr = @original_stderr\n $stdout = @original_stdout\n @original_stderr = nil\n @original_stdout = nil\n `rm #{log_file} && touch #{log_file}`\nend", "def redirect_stdout\n raise IllegalStateException, \"Already redirected\" if @saved_stdout\n @saved_stdout = $stdout\n $stdout = StringIO.new\n end", "def clear_stdout\n $stdout.string = '' if $stdout.is_a?(StringIO)\n end", "def clear_stdout!\n @stdout_handler.clear!\n end", "def enable_output\n $stderr = @orig_stderr\n $stdout = @orig_stdout\n @orig_stderr = nil\n @orig_stdout = nil\nend", "def enable_output\n $stderr = @orig_stderr\n $stdout = @orig_stdout\n @orig_stderr = nil\n @orig_stdout = nil\nend", "def enable_output\n $stderr = @original_stderr\n $stdout = @original_stdout\n @original_stderr = nil\n @original_stdout = nil\nend", "def enable_output\n # $stderr = @orig_stderr\n $stdout = @orig_stdout\n # @orig_stderr = nil\n @orig_stdout = nil\nend", "def redirect_io(logfile_name)\n begin; STDIN.reopen \"/dev/null\"; rescue ::Exception; end\n\n if logfile_name\n begin\n STDOUT.reopen logfile_name, \"a\"\n STDOUT.sync = true\n rescue ::Exception\n begin; STDOUT.reopen \"/dev/null\"; rescue ::Exception; end\n end\n else\n begin; STDOUT.reopen \"/dev/null\"; rescue ::Exception; end\n end\n\n begin; STDERR.reopen STDOUT; rescue ::Exception; end\n STDERR.sync = true\n end", "def hide\n real_stdout = $stdout\n $stdout = StringIO.new\n yield\nensure\n $stdout = real_stdout\nend", "def refute_output(stdout=nil, stderr=nil, &block)\n StdoutAssay.refute!(stdout, :backtrace=>caller, &block) if stdout\n StderrAssay.refute!(stderr, :backtrace=>caller, &block) if stderr\n end", "def silent\n if ENV[\"RSPECQ_DEBUG\"]\n yield\n return\n end\n\n begin\n orig = $stdout.clone\n $stdout.reopen(File::NULL, \"w\")\n yield\n ensure\n $stdout.reopen(orig)\n end\n end", "def supress_stdout\n mock_io = StringIO.new\n stdout_real, $stdout = $stdout, mock_io\n if block_given?\n begin\n yield\n ensure\n $stdout = stdout_real\n end\n end\n mock_io\nend", "def clear_stdout\n $stdout.string = ''\n end", "def reset_output\n $stdout = StringIO.new\nend", "def redirect\n orig_defout = $defout\n $stdout = StringIO.new\n yield\n $stdout.string\n ensure\n $stdout = orig_defout\n end", "def enable_output\n $stdout = @original_stdout\n @original_stdout = nil\nend", "def with_redirection\n ensure_files!\n redirect_stdio!\n yield\n restore_stdio!\n rescue Exception => exception\n fatal! exception.to_s_mri\n end", "def silence!\n IO.console.raw!\n end", "def silence_stream(stream)\n old_stream = stream.dup\n stream.reopen(RUBY_PLATFORM =~ /mswin/ ? 'NUL:' : '/dev/null')\n stream.sync = true\n yield\nensure\n stream.reopen(old_stream)\nend", "def hide_output\n keep_stdout = $stdout\n keep_stderr = $stderr\n $stdout = StringIO.new\n $stderr = StringIO.new\n yield\n ensure\n $stdout = keep_stdout\n $stderr = keep_stderr\n end", "def setup_stdio_redirect\n $stdin.close\n $stdout.reopen(@context.logfile, 'w+') \n $stderr.reopen(@context.logfile, 'w+') \n end", "def silence_stream(stream)\n old_stream = stream.dup\n stream.reopen(RbConfig::CONFIG['host_os'] =~ /mswin|mingw/ ? 'NUL:' : '/dev/null')\n stream.sync = true\n yield\nensure\n stream.reopen(old_stream)\n old_stream.close\nend", "def silent_command(command)\n \"{ #{command}; } >/dev/null 2>&1\"\n end", "def stdout!(stdout)\n @stdout = stdout\n self\n end", "def streams_after(out_,err_)\n $stdout = out_\n $stderr = err_\nend", "def redirect_io\n stdout = config.options[:redirect_stdout]\n stderr = config.options[:redirect_stderr] || stdout\n append = config.options[:redirect_append]\n\n if stdout\n STDOUT.reopen stdout, (append ? \"a\" : \"w\")\n STDOUT.sync = true\n STDOUT.puts \"=== puma startup: #{Time.now} ===\"\n end\n\n if stderr\n STDERR.reopen stderr, (append ? \"a\" : \"w\")\n STDERR.sync = true\n if stdout != stderr then\n # no need to dupe\n STDERR.puts \"=== puma startup: #{Time.now} ===\"\n end\n end\n end", "def capture_stdout(&block)\n silence_stdout true, &block\n end", "def capture_stderr\n previous_stdout, $stdout = $stdout, StringIO.new\n yield\n $stdout.string\nensure\n $stdout = previous_stdout\nend", "def silence_stream(stream)\n old_stream = stream.dup\n stream.reopen(\"/dev/null\")\n stream.sync = true\n yield\nensure\n stream.reopen(old_stream)\nend", "def shutup\n if ARGV.verbose?\n yield\n else\n begin\n tmperr = $stderr.clone\n tmpout = $stdout.clone\n $stderr.reopen '/dev/null', 'w'\n $stdout.reopen '/dev/null', 'w'\n yield\n ensure\n $stderr.reopen tmperr\n $stdout.reopen tmpout\n end\n end\nend", "def silence_stream(stream)\n old_stream = stream.dup\n stream.reopen('/dev/null')\n stream.sync = true\n yield\nensure\n stream.reopen(old_stream)\nend", "def reroute_stdio\n $stdout = log.file\n $stderr = log.file\n end", "def stderr!(stderr)\n @stderr = stderr\n self\n end", "def nullsink(input=nil)\n inout \"nullsink\", input, nil\n end", "def silence_stderr\n silence_stream(STDERR) { yield }\n end", "def restore_stdout\n $stdout = @original_stdout unless @original_stdout.nil?\n end", "def swallow_stdout\n s = StringIO.new\n oldstd = $stdout\n $stdout = s\n yield\n return s.string\n ensure\n $stdout = oldstd\n end", "def run_quietly(command, *args)\n if !args.empty?\n args = args.flatten.map { |i| shell_escape(i) }.join(\" \")\n command = \"#{command} #{args}\"\n end\n run(\"#{command} > /dev/null 2> /dev/null\")\n end", "def silence_stream(stream)\n old_stream = stream.dup\n stream.reopen(RUBY_PLATFORM =~ /mswin/ ? 'NUL:' : '/dev/null')\n stream.sync = true\n yield\n ensure\n stream.reopen(old_stream)\n end", "def silence_stream(stream)\n old_stream = stream.dup\n stream.reopen(RbConfig::CONFIG['host_os'] =~ /mswin|mingw/ ? 'NUL:' : '/dev/null')\n stream.sync = true\n yield\n ensure\n stream.reopen(old_stream)\n old_stream.close\n end", "def clean_fd\n ObjectSpace.each_object(IO) do |io|\n unless [STDIN, STDOUT, STDERR].include?(io)\n begin\n unless io.closed?\n io.close\n end\n rescue ::Exception\n end\n end\n end\n end", "def silence_streams(*streams)\n on_hold = streams.collect { |stream| stream.dup }\n streams.each do |stream|\n stream.reopen(os_mac? ? '/dev/null' : 'NUL:')\n stream.sync = true\n end\n yield\nensure\n streams.each_with_index do |stream, i|\n stream.reopen(on_hold[i])\n end\nend", "def stderr_or_stdout\n return stderr unless stderr.empty?\n stdout\n end", "def redirect_stdout?\n !!@redirect_stdout\n end", "def without_stdout_buffering\n sync, $stdout.sync = $stdout.sync, true\n yield\n ensure\n $stdout.sync = sync\n end", "def redirect_output!; end", "def shell_out!(*command_args)\n cmd = shell_out(*command_args)\n cmd.error!\n cmd\n end", "def quiet_stdout\n around(:example) do |example|\n capture_stdout true\n example.run\n capture_stdout false\n end\nend", "def shutup!\n $stdout = @hole\n $stderr = @hole\n end", "def restore_stderr\n $stderr = @original_stderr unless @original_stderr.nil?\n end", "def fake_stderr\n original_stderr = $stderr\n $stderr = StringIO.new\n yield\n $stderr.string\n ensure\n $stderr = original_stderr\n end", "def catch_stdio(&block)\n original_streams = [$stdin, $stdout, $stderr]\n cleanup = -> { $stdin, $stdout, $stderr = original_streams }\n stdin.clear\n stdout.clear\n stderr.clear\n $stdin = stdin\n $stdout = stdout\n $stderr = stderr\n if block\n begin\n block.call\n ensure\n cleanup.call\n end\n else\n @cleaners << cleanup\n end\n self\n end", "def silent(*what)\n return unless block_given?\n\n begin\n _stdout, $stdout = $stdout, StringIO.new if what.include?(:stdout)\n _stderr, $stderr = $stderr, StringIO.new if what.include?(:stderr)\n\n yield\n ensure\n $stdout = _stdout if what.include?(:stdout)\n $stderr = _stderr if what.include?(:stderr)\n end\nend", "def spec_helper_silence_stdout( &block )\n spec_helper_silence_stream( $stdout, &block )\nend", "def stderr\n run unless ran?\n\n @stderr\n end", "def streams_before\n out_ = $stdout\n err_ = $stderr\n $stdout = StringIO.new\n $stderr = StringIO.new\n return out_,err_\nend", "def teardown\n\t\t$stderr = @original_stderr\n\t\t$stdout = @original_stdout\n\tend", "def teardown\n\t\t$stderr = @original_stderr\n\t\t$stdout = @original_stdout\n\tend", "def setup_logging(log_path)\n log_path ||= '/dev/null'\n $stdin.reopen '/dev/null'\n $stdout.reopen(log_path, 'a')\n $stderr.reopen $stdout\n $stdout.sync = true\n end", "def teardown\n $stdout = @old_stdout\n end", "def teardown\n $stdout = @old_stdout\n end", "def teardown\n $stdout = @old_stdout\n end", "def flush!\n $stdout.flush\n end" ]
[ "0.781298", "0.781298", "0.7732118", "0.768003", "0.7649619", "0.7530993", "0.7511257", "0.75067335", "0.7145987", "0.71257967", "0.7005881", "0.68246317", "0.6683403", "0.66793615", "0.6647123", "0.6643985", "0.66033554", "0.6582603", "0.65248203", "0.65067434", "0.63578904", "0.63541573", "0.63178515", "0.6266964", "0.62626225", "0.6248822", "0.61953735", "0.61574525", "0.6126434", "0.61116713", "0.610723", "0.6091959", "0.60816866", "0.59818554", "0.59808797", "0.59681153", "0.5958378", "0.5943404", "0.59376794", "0.5908166", "0.5874215", "0.5874215", "0.58664626", "0.5848048", "0.5811867", "0.580191", "0.57872146", "0.5772312", "0.57652026", "0.5711477", "0.57047856", "0.56847614", "0.5654658", "0.56417584", "0.5634771", "0.5625559", "0.5584576", "0.558298", "0.55637515", "0.55517775", "0.55396307", "0.55010885", "0.54900914", "0.5488744", "0.5485789", "0.5443189", "0.5441025", "0.5425375", "0.5415188", "0.53877395", "0.53570384", "0.53460044", "0.53298867", "0.5290459", "0.5264901", "0.52495277", "0.5241225", "0.5240679", "0.5237886", "0.52037287", "0.5197241", "0.5190645", "0.5188755", "0.5167412", "0.51603013", "0.5158489", "0.51575094", "0.5140492", "0.51173997", "0.50785965", "0.507774", "0.5066467", "0.5035806", "0.50278497", "0.50278497", "0.50250167", "0.5020999", "0.5020999", "0.5020999", "0.499975" ]
0.77290887
3
Replace stdout and stderr so anything else is output correctly.
def enable_output $stderr = @orig_stderr $stdout = @orig_stdout @orig_stderr = nil @orig_stdout = nil end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enable_output\n $stderr = @original_stderr\n $stdout = @original_stdout\n @original_stderr = nil\n @original_stdout = nil\nend", "def enable_output\n # $stderr = @orig_stderr\n $stdout = @orig_stdout\n # @orig_stderr = nil\n @orig_stdout = nil\nend", "def stdout_redirect(stdout = T.unsafe(nil), stderr = T.unsafe(nil), append = T.unsafe(nil)); end", "def streams_after(out_,err_)\n $stdout = out_\n $stderr = err_\nend", "def enable_output\n $stderr = @original_stderr\n $stdout = @original_stdout\n @original_stderr = nil\n @original_stdout = nil\n `rm #{log_file} && touch #{log_file}`\nend", "def refute_output(stdout=nil, stderr=nil, &block)\n StdoutAssay.refute!(stdout, :backtrace=>caller, &block) if stdout\n StderrAssay.refute!(stderr, :backtrace=>caller, &block) if stderr\n end", "def redirect_stdio!\n inn, out, err = open(@stdin), open(@stdout, 'a+'), open(@stderr, 'a+')\n no_warn do\n $stdin = Object.const_set(:STDIN, inn)\n $stdout = Object.const_set(:STDOUT, out)\n $stderr = Object.const_set(:STDERR, err)\n end\n end", "def reroute_stdio\n $stdout = log.file\n $stderr = log.file\n end", "def supress_stdout\n mock_io = StringIO.new\n stdout_real, $stdout = $stdout, mock_io\n if block_given?\n begin\n yield\n ensure\n $stdout = stdout_real\n end\n end\n mock_io\nend", "def reset_output\n $stdout = StringIO.new\nend", "def capture_stderr\n previous_stdout, $stdout = $stdout, StringIO.new\n yield\n $stdout.string\nensure\n $stdout = previous_stdout\nend", "def enable_output\n $stdout = @original_stdout\n @original_stdout = nil\nend", "def redirect_stdout\n raise IllegalStateException, \"Already redirected\" if @saved_stdout\n @saved_stdout = $stdout\n $stdout = StringIO.new\n end", "def all_output\n all_stdout << all_stderr\n end", "def re_stdout\n @logdev.re_stdout\n self\n end", "def without_stderr; end", "def suppress_output\n original_stdout = $stdout.clone\n original_stderr = $stderr.clone\n $stderr.reopen File.new('/dev/null', 'w')\n $stdout.reopen File.new('/dev/null', 'w')\n yield\nensure\n $stdout.reopen original_stdout\n $stderr.reopen original_stderr\nend", "def suppress_output\n original_stdout, original_stderr = $stdout.clone, $stderr.clone\n $stderr.reopen File.new('/dev/null', 'w')\n $stdout.reopen File.new('/dev/null', 'w')\n yield\nensure\n $stdout.reopen original_stdout\n $stderr.reopen original_stderr\nend", "def suppress_stdout\n original_stderr = $stderr\n original_stdout = $stdout\n $stderr = File.open(File::NULL, 'w')\n $stdout = File.open(File::NULL, 'w')\n yield\n ensure\n $stderr = original_stderr\n $stdout = original_stdout\n end", "def suppress_output\n $stderr.reopen(\"/dev/null\", \"a\")\n $stdout.reopen($stderr)\n end", "def silence_output\n # Store the original stderr and stdout in order to restore them later\n @original_stderr = $stderr\n @original_stdout = $stdout\n\n # Redirect stderr and stdout\n $stderr = File.new('/dev/null', 'w')\n $stdout = File.new('/dev/null', 'w')\nend", "def fake_stderr\n original_stderr = $stderr\n $stderr = StringIO.new\n yield\n $stderr.string\n ensure\n $stderr = original_stderr\n end", "def clear_stderr\n $stderr.string = '' if $stderr.is_a?(StringIO)\n end", "def re_stderr\n @logdev.re_stderr\n self\n end", "def silence_output\n @orig_stderr = $stderr\n @orig_stdout = $stdout\n \n # redirect stderr and stdout to /dev/null\n $stderr = File.new('/dev/null', 'w')\n $stdout = File.new('/dev/null', 'w')\nend", "def silence_output\n @orig_stderr = $stderr\n @orig_stdout = $stdout\n $stderr = File.new('/dev/null', 'w')\n $stdout = File.new('/dev/null', 'w')\nend", "def output\n @stdout + \"\\n\" + @stderr\n end", "def restore_stdout\n $stdout = @original_stdout unless @original_stdout.nil?\n end", "def swallow_stdout\n s = StringIO.new\n oldstd = $stdout\n $stdout = s\n yield\n return s.string\n ensure\n $stdout = oldstd\n end", "def clear_stderr\n $stderr.string = ''\n end", "def redirect_io\n stdout = config.options[:redirect_stdout]\n stderr = config.options[:redirect_stderr] || stdout\n append = config.options[:redirect_append]\n\n if stdout\n STDOUT.reopen stdout, (append ? \"a\" : \"w\")\n STDOUT.sync = true\n STDOUT.puts \"=== puma startup: #{Time.now} ===\"\n end\n\n if stderr\n STDERR.reopen stderr, (append ? \"a\" : \"w\")\n STDERR.sync = true\n if stdout != stderr then\n # no need to dupe\n STDERR.puts \"=== puma startup: #{Time.now} ===\"\n end\n end\n end", "def streams_before\n out_ = $stdout\n err_ = $stderr\n $stdout = StringIO.new\n $stderr = StringIO.new\n return out_,err_\nend", "def redirect_java_err\n err = Java.java.io.ByteArrayOutputStream.new\n original_err = Java.java.lang.System.err\n begin\n printStream = Java.java.io.PrintStream\n print = printStream.new(err)\n Java.java.lang.System.setErr(print)\n yield\n ensure\n Java.java.lang.System.setErr(original_err)\n end\n err.toString\n end", "def redirect\n orig_defout = $defout\n $stdout = StringIO.new\n yield\n $stdout.string\n ensure\n $stdout = orig_defout\n end", "def redirect\n $stdin.reopen '/dev/null'\n $stdout.reopen File.new(out, \"a\")\n $stderr.reopen File.new(err, \"a\")\n $stdout.sync = $stderr.sync = true\n end", "def teardown\n\t\t$stderr = @original_stderr\n\t\t$stdout = @original_stdout\n\tend", "def teardown\n\t\t$stderr = @original_stderr\n\t\t$stdout = @original_stdout\n\tend", "def silence_output\n # @orig_stderr = $stderr\n @orig_stdout = $stdout\n \n # redirect stderr and stdout to /dev/null\n # $stderr = File.new('/dev/null', 'w')\n $stdout = File.new('/dev/null', 'w')\nend", "def extract_rye\n @stdout = original.stdout.map(&:to_s).join(\"\\n\")\n @stderr = original.stderr.map(&:to_s).join(\"\\n\")\n end", "def ok\n $stdout = @orig_stdout\n $stderr = @orig_stderr\n end", "def stdout!(stdout)\n @stdout = stdout\n self\n end", "def disable_stdout\n @old_stdout = STDOUT.dup\n # via Tomas Matousek, http://www.ruby-forum.com/topic/205887\n STDOUT.reopen(::RUBY_PLATFORM =~ /djgpp|(cyg|ms|bcc)win|mingw/ ? 'NUL' : '/dev/null')\n end", "def silence_output\n # Store the original stderr and stdout in order to restore them later\n @original_stderr = $stderr\n @original_stdout = $stdout\n\n # Redirect stderr and stdout\n $stderr = File.new(log_file, 'w')\n $stdout = File.new(log_file, 'w')\nend", "def teardown\n $stdout = @old_stdout\n end", "def teardown\n $stdout = @old_stdout\n end", "def teardown\n $stdout = @old_stdout\n end", "def fake_stdout\n original_stdout = $stdout\n $stdout = StringIO.new\n yield\n $stdout.string\n rescue RSpec::Expectations::ExpectationNotMetError => e\n @exception = e\n raise\n ensure\n $stdout = original_stdout\n end", "def error_io\n exit_code == -1 ? $stderr : $stdout\n end", "def restore_stderr\n $stderr = @original_stderr unless @original_stderr.nil?\n end", "def extract_shellout\n @stdout = original.stdout\n @stderr = original.stderr\n end", "def capture_stds(&block)\n real_stderr, $stderr = $stderr, StringIO.new\n real_stdout, $stdout = $stdout, StringIO.new\n yield\n [ $stderr.string, $stdout.string ]\n ensure\n $stdout, $stderr = real_stdout, real_stderr\n end", "def silence\n old_o, old_e = $stdout, $stderr\n $stdout = StringIO.new\n $stderr = StringIO.new\n\n yield\nensure\n $stdout = old_o if old_o\n $stderr = old_e if old_e\nend", "def capture_stdout\n # The output stream must be an IO-like object. In this case we capture it in\n # an in-memory IO object so we can return the string value. You can assign any\n # IO object here.\n previous_stdout, $stdout = $stdout, StringIO.new\n yield\n $stdout.string\nensure\n # Restore the previous value of stderr (typically equal to STDERR).\n $stdout = previous_stdout\nend", "def hide\n real_stdout = $stdout\n $stdout = StringIO.new\n yield\nensure\n $stdout = real_stdout\nend", "def without_stderr\n old_stderr = $stderr\n $stderr = StringIO.new\n\n yield\n ensure\n $stderr = old_stderr\n end", "def capture_error_output\n puts \"Capturing error output...\"\n\n @stderr.flush\n error_catching_thread = Thread.new {\n @out = \"\"\n thread = Thread.start do\n @stderr.each_line do |line|\n @out << line\n end\n end\n }\n @stderr.sync = false\n\n line = \"\"\n while c = @stdout.read(1)\n line += c\n if line =~ /\\(fcsh\\)/\n puts \"Done....\"\n return @out\n end\n next if c != \"/n\"\n\n puts \"(out) \" + line.inspect if $DEBUG\n if line =~ /Nothing has changed/\n puts \"Nothing has changed\" if $DEBUG\n return @out\n end\n\n if line =~ /Files changed:/\n puts \"Filed changed:\" if $DEBUG\n return @out\n end\n\n if line =~ /Error: (.*)/\n raise CompileError.new(line)\n end\n\n if line =~ /.+\\.swf/\n puts \"\" if $DEBUG\n return @out\n end\n\n line = \"\"\n end\n end", "def capture_stdout\n old_stdout = $stdout\n $stdout = StringIO.new\n\n yield\n\n $stdout.string\nensure\n $stdout = old_stdout\nend", "def surpress_stdout\n stdout_orig = $stdout\n $stdout = @temp_file\n begin\n yield\n ensure\n $stdout = stdout_orig\n end\n end", "def stderr!(stderr)\n @stderr = stderr\n self\n end", "def redirect_stdout\n if Capybara::Chromefoil.mri?\n yield\n else\n begin\n prev = STDOUT.dup\n $stdout = @write_io\n STDOUT.reopen(@write_io)\n yield\n ensure\n STDOUT.reopen(prev)\n $stdout = STDOUT\n prev.close\n end\n end\n end", "def disable_stdout\n @old_stdout = STDOUT.dup\n STDOUT.reopen(PLATFORM =~ /mswin/ ? \"NUL\" : \"/dev/null\")\n end", "def stdouts; end", "def hide_output\n keep_stdout = $stdout\n keep_stderr = $stderr\n $stdout = StringIO.new\n $stderr = StringIO.new\n yield\n ensure\n $stdout = keep_stdout\n $stderr = keep_stderr\n end", "def capture_stdout(&blk)\n old = $stdout\n $stdout = fake = StringIO.new\n blk.call\n fake.string\nensure\n $stdout = old\nend", "def capture_stdout(&blk)\n old = $stdout\n $stdout = fake = StringIO.new\n blk.call\n fake.string\nensure\n $stdout = old\nend", "def capture_stdout(&blk)\n old = $stdout\n $stdout = fake = StringIO.new\n blk.call\n fake.string\nensure\n $stdout = old\nend", "def with_redirection\n ensure_files!\n redirect_stdio!\n yield\n restore_stdio!\n rescue Exception => exception\n fatal! exception.to_s_mri\n end", "def setup_stdio_redirect\n $stdin.close\n $stdout.reopen(@context.logfile, 'w+') \n $stderr.reopen(@context.logfile, 'w+') \n end", "def silence_output\n orig_stdout = $stdout\n $stdout = StringIO.new\n yield\n $stdout = orig_stdout\nend", "def silent_out\n stdout = $stdout\n $stdout = StringIO.new\n begin\n yield if block_given?\n ensure\n $stdout = stdout\n end\nend", "def mock_stderr\n @original_stderr ||= $stderr\n $stderr = StringIO.new\n end", "def clear_stderr!\n @stderr_handler.clear!\n end", "def safe_each\n [$stderr, $stdout].each { |io| safely { yield io } }\n end", "def clear_stdout\n $stdout.string = '' if $stdout.is_a?(StringIO)\n end", "def log_standard_outputs\n [STDOUT, STDERR].each do |output|\n (class << output; self; end).class_eval do\n alias __write__ write\n\n def write string\n Wmiirc::LOG << string\n __write__ string\n end\n\n alias << write\n end\n end\n end", "def try_redirect\n $stdin.reopen(Rack::App::Utils.devnull_path)\n $stdout.reopen(Rack::App::Worker::Environment.stdout)\n $stderr.reopen(Rack::App::Worker::Environment.stderr)\n $stdout.sync = $stderr.sync = true\n rescue Errno::ENOENT\n retry\n end", "def mock_stdout\n @original_stdout ||= $stdout\n $stdout = StringIO.new\n end", "def stderr(command, data)\n # called when the process writes to STDERR\n end", "def all_stderr\n registered_commands.each(&:stop)\n\n registered_commands.each_with_object(\"\") { |e, a| a << e.stderr }\n end", "def `(cmd)\n old_execute(cmd + \" 2>&1\")\nend", "def stderr_or_stdout\n return stderr unless stderr.empty?\n stdout\n end", "def stderr\n run unless ran?\n\n @stderr\n end", "def capture_stderr(&block)\n old = $stderr\n $stderr = fake = StringIO.new\n yield\n fake.string\n ensure\n $stderr = old\n end", "def format_redirect_stderr(cmd, target = NULL_OUTPUT_NAME)\n return cmd + \" 2>#{target}\"\n end", "def out_clean\n out = $stdout\n $stdout = @stdout\n out.string\n end", "def map_error_output(filename)\r\n\tDir.mkdir(\"#{settings.root}/tmp\") unless File.exists?(\"#{settings.root}/tmp\")\r\n\tfile = File.new(\"#{settings.root}/tmp/#{filename}\", 'a+')\r\n\tfile.sync = true\r\n\t$stderr.reopen file\r\nend", "def stderr; end", "def stderr; end", "def stderr; end", "def stderr; end", "def restore_stdout\n content = nil\n if @saved_stdout\n @saved_stdout.flush\n content = $stdout.string\n $stdout = @saved_stdout\n @saved_stdout = nil\n end\n content\n end", "def capture_output\n begin\n require 'stringio'\n $o_stdout, $o_stderr = $stdout, $stderr\n $stdout, $stderr = StringIO.new, StringIO.new\n yield\n {:stdout => $stdout.string, :stderr => $stderr.string}\n ensure\n $stdout, $stderr = $o_stdout, $o_stderr\n end\nend", "def clear_stdout!\n @stdout_handler.clear!\n end", "def capture_output &block\n old_stdout = $stdout\n test_stdout = StringIO.new\n $stdout = test_stdout\n block.call\n test_stdout.string\nensure\n $stdout = old_stdout\nend", "def fatal!(message)\n # Not using safe_each in case that caused an error.\n safely { $stdout.reopen(@stdout, 'a+'); $stdout.puts message }\n safely { $stderr.reopen(@stderr, 'a+'); $stderr.puts message }\n exit 1\n end", "def capture_stdout\n # The output stream must be an IO-like object. In this case we capture it in\n # an in-memory IO object so we can return the string value. You can assign any\n # IO object here.\n stdout = StringIO.new\n previous_stdout, $stdout = $stdout, stdout\n previous_stderr, $stderr = $stderr, stdout\n yield\n stdout.string\n \n rescue Exception => msg\n puts(\"Error: #{stdout.string}\")\n raise msg\n \n ensure\n # Restore the previous value of stderr (typically equal to STDERR).\n $stdout = previous_stdout\n $stderr = previous_stderr\n end", "def capture_stdout(&block)\n original_stdout = $stdout\n $stdout = fake = StringIO.new\n begin\n yield\n ensure\n $stdout = original_stdout\n end\n fake.string\nend", "def capture_stdout(&block)\n original_stdout = $stdout\n $stdout = fake = StringIO.new\n begin\n yield\n ensure\n $stdout = original_stdout\n end\n fake.string\nend", "def stdout; end" ]
[ "0.7068447", "0.7055271", "0.69756573", "0.6945191", "0.67468333", "0.6693161", "0.66484386", "0.6645464", "0.6613646", "0.65677196", "0.6555749", "0.6540932", "0.6477912", "0.6471895", "0.646281", "0.64559585", "0.6444204", "0.6433146", "0.64129597", "0.63809717", "0.6335418", "0.6333024", "0.6328634", "0.6324666", "0.6317956", "0.63096184", "0.6307047", "0.6300101", "0.6285397", "0.6284351", "0.6256491", "0.62293166", "0.621984", "0.6219352", "0.62074536", "0.6192826", "0.6192826", "0.61920625", "0.61705834", "0.6167616", "0.61518896", "0.6141869", "0.61315495", "0.61232823", "0.61232823", "0.61232823", "0.6114622", "0.6105124", "0.608613", "0.6081246", "0.6072257", "0.60547197", "0.6037823", "0.60162216", "0.600075", "0.5953654", "0.59417045", "0.5932677", "0.5930194", "0.5928563", "0.5919831", "0.59067357", "0.5902211", "0.5900313", "0.5900313", "0.5900313", "0.5885297", "0.58834165", "0.58628535", "0.58530307", "0.585293", "0.58419657", "0.5839163", "0.5826584", "0.58231485", "0.58226615", "0.58157843", "0.57852614", "0.5784173", "0.57784367", "0.5776691", "0.57355815", "0.57316554", "0.5730902", "0.57246906", "0.57246727", "0.5712345", "0.5712345", "0.5712345", "0.5712345", "0.5700663", "0.56831515", "0.56629586", "0.563377", "0.5600881", "0.5590086", "0.5581832", "0.5581832", "0.5565944" ]
0.71049917
1
Removes any existing console handler, and adds a ColorConsoleHandler.
def replace_console_logger(options = {}) logger = options[:logger] name = options.fetch(:outputter, 'color-console') level = case options.delete(:level) when String, Symbol then Log4r::LNAMES.index(options[:level].to_s.upcase) end log = logger ? Log4r::Logger[logger] : Log4r::Logger.root log = Log4r::Logger.new(logger) unless log # Remove any existing console handler Log4r::Logger.each_logger do |l| l.outputters.each do |o| l.remove(o.name) if o.is_a?(Log4r::StdoutOutputter) end end # Add a ColorConsoleHandler out = Log4rLogger::ColorConsoleOutputter.new(name, options) log.add out # Set the log level log.level = level if level end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replace_console_logger(options = {})\n logger = options.fetch(:logger, '')\n level = options[:level]\n format = options.fetch(:format, JavaUtilLogger::RubyFormatter::DEFAULT_FORMAT)\n\n # Remove any existing console handler\n l = Java::JavaUtilLogging::Logger.getLogger(logger)\n l.getHandlers.each do |h|\n case h\n when Console::JavaUtilLogger::ColorConsoleHandler\n return\n when Java::JavaUtilLogging::ConsoleHandler\n l.removeHandler(h)\n end\n end\n\n # Add a ColorConsoleHandler\n h = JavaUtilLogger::ColorConsoleHandler.new(format)\n if lbls = options[:level_labels]\n h.formatter.level_labels.merge!(lbls)\n end\n l.addHandler(h)\n\n # Set the log level\n case level\n when Symbol, String\n l.level = Java::JavaUtilLogging::Level.const_get(level.upcase.intern)\n when Java::JavaUtilLogging::Level\n l.level = level\n end\n end", "def cleanup\n\t\t# If we had previously registered a console dispatcher with the console,\n\t\t# deregister it now.\n\t\tremove_console_dispatcher('DbFun')\n\tend", "def process_console\r\n process(console)\r\n ensure\r\n console.flush\r\n end", "def close_console\n self.destroy\n end", "def set_console(input = $stdin, output = $stdout)\n @console = HighLine.new(input, output)\n end", "def clear_stderr!\n @stderr_handler.clear!\n end", "def clear_debug_console\n @debugscr.clrtoeol(0...@lines) if Rails.env.development? && @debug\n end", "def clear_stdout!\n @stdout_handler.clear!\n end", "def add_default_handlers\n # Puts (for debugging)\n @handlers[\"#{NAMESPACE}.puts\"] = Proc.new { |dialog, *arguments|\n puts(*arguments.map { |argument| argument.inspect })\n }\n RESERVED_NAMES << \"#{NAMESPACE}.puts\"\n\n # Error channel (for debugging)\n @handlers[\"#{NAMESPACE}.error\"] = Proc.new { |dialog, type, message, backtrace|\n Utils.log_error(type + ': ' + message, {:language => 'javascript', :backtrace => backtrace})\n }\n RESERVED_NAMES << \"#{NAMESPACE}.error\"\n end", "def callback_handler_remove\n rl_callback_handler_remove\n @rl_callback_handler = nil\n end", "def extend_console(name, care = true, required = true)\n if care\n require name if required\n yield if block_given?\n $console_extensions << \"#{ANSI[:GREEN]}#{name}#{ANSI[:RESET]}\"\n else\n $console_extensions <<\n \"#{ANSI[:GRAY]}#{name}#{ANSI[:RESET]}\"\n end\nrescue LoadError\n $console_extensions <<\n \"#{ANSI[:RED]}#{name}#{ANSI[:RESET]}\"\nend", "def extend_console(name, enabled = true, required = true)\n if name.match(/^#/) && required\n required = false\n end\n if enabled\n require name if required\n yield if block_given?\n $console_extensions << \"#{ANSI[:GREEN]}#{name}#{ANSI[:RESET]}\"\n else\n $console_extensions << \"#{ANSI[:LGRAY]}#{name}#{ANSI[:RESET]}\"\n end\nrescue LoadError\n $console_extensions << \"#{ANSI[:RED]}#{name}#{ANSI[:RESET]}\"\nend", "def extend_console(name, care = true, required = true)\n if care\n require name if required\n yield if block_given?\n $console_extensions << \"#{ANSI[:GREEN]}#{name}#{ANSI[:RESET]}\"\n else\n $console_extensions << \"#{ANSI[:GRAY]}#{name}#{ANSI[:RESET]}\"\n end\nrescue LoadError\n $console_extensions << \"#{ANSI[:RED]}#{name}#{ANSI[:RESET]}\"\nend", "def extend_console(name, care = true, required = true)\n if care\n require name if required\n yield if block_given?\n $console_extensions << \"#{ANSI[:GREEN]}#{name}#{ANSI[:RESET]}\"\n else\n $console_extensions << \"#{ANSI[:GRAY]}#{name}#{ANSI[:RESET]}\"\n end\nrescue LoadError\n $console_extensions << \"#{ANSI[:RED]}#{name}#{ANSI[:RESET]}\"\nend", "def extend_console(name, care = true, required = true)\n if care\n require name if required\n yield if block_given?\n $console_extensions << \"#{ANSI[:GREEN]}#{name}#{ANSI[:RESET]}\"\n else\n $console_extensions << \"#{ANSI[:GRAY]}#{name}#{ANSI[:RESET]}\"\n end\nrescue LoadError\n $console_extensions << \"#{ANSI[:RED]}#{name}#{ANSI[:RESET]}\"\nend", "def extend_console(name, care = true, required = true)\n if care\n require name if required\n yield if block_given?\n $console_extensions << \"#{ANSI[:GREEN]}#{name}#{ANSI[:RESET]}\"\n else\n $console_extensions << \"#{ANSI[:GRAY]}#{name}#{ANSI[:RESET]}\"\n end\nrescue LoadError\n $console_extensions << \"#{ANSI[:RED]}#{name}#{ANSI[:RESET]}\"\nend", "def extend_console(name, care = true, required = true)\n if care\n require name if required\n yield if block_given?\n $console_extensions << \"#{ANSI[:GREEN]}#{name}#{ANSI[:RESET]}\"\n else\n $console_extensions << \"#{ANSI[:GRAY]}#{name}#{ANSI[:RESET]}\"\n end\nrescue LoadError\n $console_extensions << \"#{ANSI[:RED]}#{name}#{ANSI[:RESET]}\"\nend", "def reset_colors\n @color_output ||= true\n\n # Build the default colors\n Term::ANSIColorHI.coloring = color_output\n c = Term::ANSIColorHI\n @color_app_info = c.intense_white + c.bold\n @color_app_exe = c.intense_green + c.bold\n @color_command = c.intense_yellow\n @color_description = c.intense_white\n @color_parameter = c.intense_cyan\n @color_usage = c.intense_black + c.bold\n \n @color_error_word = c.intense_black + c.bold\n @color_error_name = c.intense_red + c.bold\n @color_error_description = c.intense_white + c.bold\n \n @color_bold = c.bold\n @color_reset = c.reset\n @screen_clear = \"\\e[H\\e[2J\"\n end", "def reset_ui\n\t\tconsole.unset_log_source\n\t\tconsole.reset_ui\n\tend", "def reset_ui\n\t\tconsole.unset_log_source\n\t\tconsole.reset_ui\n\tend", "def reset\n @color_schemes ||= {}\n @color_schemes.clear\n\n new(:default, :levels => {\n :info => :green,\n :warn => :yellow,\n :error => :red,\n :fatal => [:white, :on_red]\n })\n end", "def register_signal_handlers\n at_exit { shutdown }\n log! \"registered at_exit shutdown hook (instead of signal handlers)\"\n end", "def <<(command_handler)\n command_handler.commands.each do |command|\n @commands[command] = command_handler\n end\n end", "def add_handler(new_handler)\n @handlers.push(new_handler)\n end", "def with_console\n begin\n console = attach\n yield console\n ensure\n console.close if console\n end\n end", "def reset_handlers\n @handlers = nil\n end", "def console_polyfill\n <<~JS\n var debugConsole = console;\n var console = { history: [] };\n ['error', 'log', 'info', 'warn'].forEach(function (level) {\n console[level] = function () {\n var argArray = Array.prototype.slice.call(arguments);\n if (argArray.length > 0) {\n argArray[0] = '[SERVER] ' + argArray[0];\n }\n console.history.push({level: level, arguments: argArray});\n };\n });\n JS\n end", "def clear\n @handlers.clear\n end", "def console\n puts 'Entering debug console.'\n if RUBY_VERSION == '2.0.0'\n require 'byebug'\n byebug\n else\n require 'ruby-debug'\n Debugger.start\n debugger\n end\n puts 'Leaving debug console.'\n end", "def clearConsole(sender)\n @viewController.clearConsole(sender)\n end", "def end!\n @color = @@colors[:red]\n end", "def cleanup\n STDERR.puts \"cleanup called\"\n if @console\n @console.dispose\n System.gc\n end\n end", "def remove_channel_open_handler( type, id )\n @channel_open_handlers[ type ][ id-1 ] = nil\n end", "def clearConsole()\n system \"clear\" or system \"cls\"\nend", "def clearConsole()\n system \"clear\" or system \"cls\"\nend", "def register_signal_handlers\n trap(\"QUIT\") { stop }\n trap(\"INT\") { @stop ? stop! : stop }\n trap(\"TERM\") { stop! }\n end", "def add_handler(handler)\n @handlers << handler\n end", "def colorize(*args)\n shell.set_color(*args)\n end", "def register_signal_handlers\r\n\t\t\ttrap('TERM') { shutdown! }\r\n\t\t\ttrap('INT') { shutdown! }\r\n\r\n\t\t\tbegin\r\n\t\t\t\ttrap('QUIT') { shutdown }\r\n\t\t\trescue ArgumentError\r\n\t\t\tend\r\n\t\tend", "def set_colors\n if color_output\n @c_app_info = @color_app_info\n @c_app_exe = @color_app_exe\n @c_command = @color_command\n @c_description = @color_description\n @c_parameter = @color_parameter\n @c_usage = @color_usage\n \n @c_error_word = @color_error_word\n @c_error_name = @color_error_name\n @c_error_description = @color_error_description\n \n @c_bold = @color_bold\n @c_reset = @color_reset\n else\n @c_app_info, @c_app_exe, @c_command, @c_description,\n @c_parameter, @c_usage, @c_bold, @c_reset, @c_error_word,\n @c_error_name, @c_error_description = [\"\"]*12\n end\n end", "def close\n $stdout.puts if @debug\n @leds = nil\n end", "def add_handler(handler)\n h, options = *(handler.is_a?(Array) ? handler : [handler, {}])\n name = coerce_handler(h)\n global_opts = { output: @output, config: @config }\n opts = global_opts.merge(options)\n ready_handler = name.new(opts)\n @ready_handlers << ready_handler\n end", "def set_colors\n if @color_output \n @c_app_info = @color_app_info\n @c_app_exe = @color_app_exe\n @c_command = @color_command\n @c_description = @color_description\n @c_parameter = @color_parameter\n @c_usage = @color_usage\n \n @c_error_word = @color_error_word\n @c_error_name = @color_error_name\n @c_error_description = @color_error_description\n \n @c_bold = @color_bold\n @c_reset = @color_reset\n else\n @c_app_info, @c_app_exe, @c_command, \n @c_description, @c_parameter, @c_usage, \n @c_bold, @c_reset, @c_error_word, \n @c_error_name, @c_error_description = [\"\"]*11\n end\n end", "def console\n @console ||= set_console\n @console\n end", "def register_signal_handlers!\n trap('TERM') { shutdown }\n trap('INT') { shutdown }\n end", "def std_colors\n FFI::NCurses.use_default_colors\n # 2018-03-17 - changing it to ncurses defaults\n FFI::NCurses.init_pair(0, FFI::NCurses::BLACK, -1)\n FFI::NCurses.init_pair(1, FFI::NCurses::RED, -1)\n FFI::NCurses.init_pair(2, FFI::NCurses::GREEN, -1)\n FFI::NCurses.init_pair(3, FFI::NCurses::YELLOW, -1)\n FFI::NCurses.init_pair(4, FFI::NCurses::BLUE, -1)\n FFI::NCurses.init_pair(5, FFI::NCurses::MAGENTA, -1)\n FFI::NCurses.init_pair(6, FFI::NCurses::CYAN, -1)\n FFI::NCurses.init_pair(7, FFI::NCurses::WHITE, -1)\n # ideally the rest should be done by application\n #FFI::NCurses.init_pair(8, FFI::NCurses::WHITE, -1)\n #FFI::NCurses.init_pair(9, FFI::NCurses::BLUE, -1)\n FFI::NCurses.init_pair(10, FFI::NCurses::BLACK, FFI::NCurses::CYAN)\n FFI::NCurses.init_pair(12, FFI::NCurses::BLACK, FFI::NCurses::BLUE)\n FFI::NCurses.init_pair(13, FFI::NCurses::BLACK, FFI::NCurses::MAGENTA)\n\n FFI::NCurses.init_pair(14, FFI::NCurses::WHITE, FFI::NCurses::CYAN)\n=begin\n FFI::NCurses.init_pair(8, FFI::NCurses::WHITE, FFI::NCurses::BLUE)\n FFI::NCurses.init_pair(9, FFI::NCurses::BLUE, FFI::NCurses::BLUE)\n FFI::NCurses.init_pair(10, FFI::NCurses::BLACK, FFI::NCurses::GREEN)\n FFI::NCurses.init_pair(11, FFI::NCurses::BLACK, FFI::NCurses::YELLOW)\n FFI::NCurses.init_pair(12, FFI::NCurses::BLACK, FFI::NCurses::BLUE)\n FFI::NCurses.init_pair(13, FFI::NCurses::BLACK, FFI::NCurses::MAGENTA)\n FFI::NCurses.init_pair(14, FFI::NCurses::BLACK, FFI::NCurses::CYAN)\n FFI::NCurses.init_pair(15, FFI::NCurses::BLACK, FFI::NCurses::WHITE)\n=end\n end", "def remove_handler(handler)\n @ready_handlers.delete(handler)\n end", "def handle_signal(signal)\n case signal.chop #remove new line in the end\n when SIGNALS[:close] #when user going to close the console\n close_console\n when SIGNALS[:clear] #when user going to clear eval stack\n Console.clear_eval\n :continue\n end\n end", "def handle_signal(signal)\n case signal.chop #remove new line in the end\n when SIGNALS[:close] #when user going to close the console\n close_console\n when SIGNALS[:clear] #when user going to clear eval stack\n Console.clear_eval\n :continue\n end\n end", "def write(message)\n messages << remove_ansi_colors(message)\n end", "def reset_colors\n @color_output = false\n\n #Term::ANSIColor.coloring = true\n c = Term::ANSIColor\n @color_app_info = c.intense_white + c.bold\n @color_app_exe = c.intense_green + c.bold\n @color_command = c.intense_yellow\n @color_description = c.intense_white\n @color_parameter = c.intense_cyan\n @color_usage = c.intense_black + c.bold\n \n @color_error_word = c.intense_black + c.bold\n @color_error_name = c.intense_red + c.bold\n @color_error_description = c.intense_white + c.bold\n \n @color_bold = c.bold\n @color_reset = c.reset\n end", "def init_curses\n # signal(SIGINT, finish)\n\n Curses.init_screen\n Curses.raw\n Curses.nonl\n #Curses.cbreak\n Curses.noecho\n Curses.curs_set(0)\n Curses.ESCDELAY = 10\n Curses.start_color\n Curses.init_pair(1, Curses::COLOR_WHITE, Curses::COLOR_BLUE);\n\n @screen = Curses.stdscr\n\n @screen.scrollok(true)\n @screen.keypad(true)\n end", "def closed!\n @color = @@colors[:lightcyan]\n end", "def curses_menu_finalize\n @screenshot = capture_screenshot\n super\n end", "def set_logger_callback\n @log_callback = Proc.new do |level, msg|\n @logger.send(level, msg)\n end\n\n Binding.setopt_log_handler(@mongocrypt, @log_callback)\n end", "def method_missing(method_name, *arguments, &block)\n if @@color_codes.keys.include? method_name.to_sym\n self.class.create_colors\n self.send(method_name.to_s)\n else\n super\n end\n end", "def add_command(name, handler)\n @commands[name] = handler\n end", "def add_message_handler(&block)\n @message_handlers << block\n end", "def add_finalizer(shell_id)\n ObjectSpace.define_finalizer(self, self.class.finalize(shell_id, service))\n end", "def set_end_handler(handler)\n @end_handler = handler\n end", "def reset_screen_clearing\n @clear_screen = false\n @clear_screen_code = \"\\e[H\\e[2J\"\n end", "def reset_signal_handlers\n Signal.list_trappable.each_key do |signal|\n begin\n prev_handler = trap(signal, DEFAULT)\n if prev_handler != DEFAULT\n @previous_signal_handlers[signal] = prev_handler\n end\n rescue ArgumentError\n # Signal cannot be trapped; ignore it.\n end\n end\n trap('HUP', IGNORE)\n PhusionPassenger.call_event(:after_installing_signal_handlers)\n end", "def set_end_handler(method)\n @end_handler = method\n end", "def set_console\n @console = Console.find(params[:id])\n end", "def set_loggers_format\n [@logger, @logger_stderr].each do |logger|\n logger.formatter = proc do |severity, _datetime, progname, msg|\n # If the message already has control characters, don't colorize it\n keep_original_color = msg.include? \"\\u001b\"\n message = \"[#{Time.now.utc.strftime('%F %T')} (PID #{$PROCESS_ID} / TID #{Thread.current.object_id})] #{severity.rjust(5)} - [ #{progname} ] - \"\n message << \"#{msg}\\n\" unless keep_original_color\n LEVELS_MODIFIERS[severity.downcase.to_sym].each do |modifier|\n message = message.send(modifier)\n end\n message << \"#{msg}\\n\" if keep_original_color\n message\n end\n end\n end", "def displayRemoveAllEventHandlers _obj, _args\n \"_obj displayRemoveAllEventHandlers _args;\" \n end", "def catch_stdio(&block)\n original_streams = [$stdin, $stdout, $stderr]\n cleanup = -> { $stdin, $stdout, $stderr = original_streams }\n stdin.clear\n stdout.clear\n stderr.clear\n $stdin = stdin\n $stdout = stdout\n $stderr = stderr\n if block\n begin\n block.call\n ensure\n cleanup.call\n end\n else\n @cleaners << cleanup\n end\n self\n end", "def hook_up_parser_to_screen(parser, screen)\n parser.configure_callbacks do |config|\n config.handle_print do |code|\n screen.print code\n end\n\n config.handle_csi_dispatch do |sequence|\n self.class.logger.debug \"csi_dispatch sequence: #{sequence.inspect}\"\n sequence or next\n\n if sequence.capname == 'sgr' && sequence.param_name == 'normal'\n screen.pen.bold = false\n elsif sequence.capname == 'bold'\n screen.pen.bold = true\n elsif sequence.capname == 'smcup'\n screen.save_cursor\n screen.alternate_screen_buffer\n elsif sequence.capname == 'rmcup'\n screen.save_cursor\n screen.normal_screen_buffer\n elsif sequence.capname == 'elr'\n screen.erase_in_line\n else\n self.class.logger.debug \"unhandled csi_dispatch sequence: #{sequence.inspect}\"\n end\n end\n\n config.handle_esc_dispatch do |sequence|\n self.class.logger.debug \"esc_dispatch sequence: #{sequence.inspect}\"\n self.class.logger.debug \"unhandled esc_dispatch sequence: #{sequence.inspect}\"\n end\n\n config.handle_execute do |sequence|\n self.class.logger.debug \"execute sequence: #{sequence.inspect}\"\n sequence or next\n\n if sequence.capname == 'cr'\n screen.pen.move_left full: true\n elsif sequence.capname == 'nl'\n screen.pen.move_down\n elsif sequence.long_name == 'backspace'\n screen.pen.move_left\n else\n self.class.logger.debug \"unhandled execute sequence: #{sequence.inspect}\"\n end\n end\n end\n end", "def register_signal_handlers\n trap(\"TERM\") { shutdown }\n trap(\"INT\") { shutdown }\n trap('QUIT') { shutdown } unless defined? JRUBY_VERSION\n end", "def remove_channel_handler(channel, handler)\n handlers = @channels[channel]\n handlers.delete(handler) unless handlers.nil?\n end", "def reload\n reload_msg = '# Reloading the console...'\n puts CodeRay.scan(reload_msg, :ruby).term\n Pry.save_history\n exec('rake console')\n end", "def push_console(text)\n \n @console_list.unshift(text)\n @console_list.uniq!\n @console_list.pop if @console_list.size >= 10\n end", "def set_fg\n STDOUT.write \"\\033[38;5;#{to_xterm}m\"\n end", "def register_handler\n @register_handler ||= Justdi::RegisterHandler\n end", "def log_handler=(v)\n if [String, IO].include?(v.class)\n @log_handler = ::Logger.new(v)\n else\n for m in [:debug, :info, :warn, :error, :fatal]\n unless v.respond_to?(m)\n error \"Logger #{v} don't respond to #{m}. Aborting!\"\n return\n end\n end\n @log_handler = v\n end\n end", "def register_signal_handlers\n trap('TERM') { shutdown! }\n trap('INT') { shutdown! }\n\n begin\n trap('QUIT') { shutdown }\n rescue ArgumentError\n warn \"Signals TERM and/or QUIT not supported.\"\n end\n\n log! \"Registered signals\"\n end", "def log_handler\n @log_handler ||= ::Logger.new(STDOUT)\n end", "def register_signal_handlers\n trap('TERM') { shutdown }\n trap('INT') { shutdown }\n trap('QUIT') { shutdown }\n trap 'SIGHUP', 'IGNORE'\n end", "def color_fix\n cli = HighLine.new\n ft = HighLine::ColorScheme.new do |cs|\n cs[:headline] = [ :bold, :yellow, :on_black ]\n cs[:horizontal_line] = [ :bold, :yellow ]\n cs[:headline_1] = [ :bold, :black, :on_yellow ]\n cs[:horizontal_line_1] = [ :yellow ]\n cs[:warning] = [ :bold, :red, :on_white ]\n cs[:menu_choice] = [ :bold, :light_blue, :on_red]\n cs[:menu_line] = [ :bold, :light_blue]\n end\n HighLine.color_scheme = ft\nend", "def method_missing(method, *args, &block)\n super unless term_colorizer_methods.include? method\n self.class.send(:define_method, method) do\n str = self\n str = add_normal_color(str, method) if color_methods.include? method\n str = add_bright_color(str, method) if bright_color_methods.include? method\n str = add_bg_color(str, method) if bg_color_methods.include? method\n str = add_underline(str) if \"underline\".eql? method.to_s\n str = add_strikethrough_effect(str) if \"strikethrough\".eql? method.to_s\n str = str + \"\\e[0m\" unless str.end_with? \"\\e[0m\"\n str\n end and self.send(method, *args)\n end", "def set_commands\n super\n @commands['clear'] = ConsoleCommand_Clear.new(self)\n @commands['confirm'] = ConsoleCommand_Confirm.new(self)\n end", "def console; end", "def console; end", "def destroy\n @color_scheme.destroy\n respond_to do |format|\n format.html { redirect_to admin_color_schemes_url, notice: 'Color scheme was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def console\n puts \"`heroku #{current_command}` has been removed. Please use: `heroku run` instead.\"\n puts \"For more information, please see:\"\n puts \" * https://devcenter.heroku.com/articles/one-off-dynos\"\n puts \" * https://devcenter.heroku.com/articles/rails3#console\"\n puts \" * https://devcenter.heroku.com/articles/console-bamboo\"\n end", "def clear_commands\n @@commands = HELP_COMMAND.dup\n end", "def register_signal_handlers\n trap('TERM') { shutdown! }\n trap('INT') { shutdown! }\n\n trap('QUIT') { shutdown }\n\n log.info \"Registered signals\"\n end", "def reset!\n @backtrace_filters = {}\n @backtrace_silencers = []\n @levels = { nil => :info }\n @appenders = []\n\n # If a DelegatingFormatter has already been set up replace its\n # formatter, otherwise create a new one.\n #\n if @formatter\n @formatter.formatter = StandardFormatter.new\n else\n @formatter = DelegatingFormatter.new(StandardFormatter.new)\n end\n end", "def reset\n STDOUT.write \"\\033[0m\"\n end", "def reset_color_scheme\n self.color_scheme = nil\n end", "def color(text, color, mode_options = {}) # :doc:\n return text unless colorize_logging\n color = self.class.const_get(color.upcase) if color.is_a?(Symbol)\n mode = mode_from(mode_options)\n clear = \"\\e[#{MODES[:clear]}m\"\n \"#{mode}#{color}#{text}#{clear}\"\n end", "def listen\n # if the user resizes the screen we redraw it to fit the new dimensions\n Console.set_console_resized_hook! do\n draw\n end\n\n # create an interaction object to handle user input\n interaction = Interaction.new\n\n # call draw here because interaction blocks until it gets input\n draw\n\n # loop over user input (individual keypresses)\n interaction.loop do |key|\n @last_key = key\n if key == \"q\" then\n interaction.quit!\n end\n draw\n end\n end", "def rails_console\n say 'Loading Rails console...'\n Tk.update\n require \"#{Dir.pwd}/config/boot\"\n if File.exists?(\"#{Dir.pwd}/config/application.rb\")\n Object.const_set :APP_PATH, File.expand_path(\"#{Dir.pwd}/config/application\")\n require APP_PATH\n require 'rails/console/app'\n require 'rails/console/helpers'\n if defined?(Rails::ConsoleMethods)\n self.class.include Rails::ConsoleMethods\n end\n ::Rails.application.require_environment!\n else\n [\"#{Dir.pwd}/config/environment\", 'console_app', 'console_with_helpers'].each {|e| require e }\n end\n say 'Rails console loaded!'\n end", "def add_colors\n\tNcurses.start_color\n\tcolors = %w[RED BLUE GREEN MAGENTA CYAN YELLOW]\n\tcolors.each { |color|\n\t\teval \"Ncurses.init_color( Ncurses::COLOR_#{color}, #{rand(0..1000)}, #{rand(0..1000)}, #{rand(0..1000)} )\"\n\t}\n\t#Ncurses.init_pair( PAIR_NUMBER, BORDER_LINE_COLOR, BORDER_COLOR)\n\trandom_color = eval \"Ncurses::COLOR_#{colors.sample}\"\n\tNcurses.init_pair(2, random_color, Ncurses::COLOR_RED)\n\tNcurses.init_pair(3, random_color, Ncurses::COLOR_BLUE)\n\tNcurses.init_pair(4, random_color, Ncurses::COLOR_GREEN)\n\tNcurses.init_pair(5, random_color, Ncurses::COLOR_MAGENTA)\n\tNcurses.init_pair(6, random_color, Ncurses::COLOR_CYAN)\n\tNcurses.init_pair(7, random_color, Ncurses::COLOR_YELLOW)\nend", "def clear_screen\r\n RUBY_PLATFORM =~ /cygwin|mswin|mingw|bccwin|wince|emx/ ? system(\"cls\") : system(\"clear\")\r\n end", "def clear_history()\n if @handle.ptr == nil\n raise \"this is disposed\"\n end\n Native.RunEditor_clear_history(@handle.ptr)\n end", "def RemoveBatchConverterEventsHandler(arg0)\n ret = _invoke(1610744290, [arg0], [VT_BYREF | VT_DISPATCH])\n @lastargs = WIN32OLE::ARGV\n ret\n end", "def set_console\n @console = Console.find(params[:id])\n end", "def reset\n # color is enabled by default, can be turned of by switch --no-color\n Term::ANSIColor.coloring = true\n end", "def console\n @console ||= Howzit::ConsoleLogger.new(options[:log_level])\n end" ]
[ "0.6482822", "0.519442", "0.5058755", "0.49304107", "0.4911241", "0.48777553", "0.48175898", "0.47757718", "0.4648921", "0.46487862", "0.46147546", "0.46146566", "0.46029788", "0.46029788", "0.46029788", "0.46029788", "0.46029788", "0.45920813", "0.45699403", "0.45699403", "0.4566266", "0.45246604", "0.44855645", "0.4483297", "0.44758236", "0.44752014", "0.44668877", "0.44524518", "0.44326052", "0.4398454", "0.43828958", "0.43754074", "0.43497837", "0.4349301", "0.4349301", "0.43187404", "0.43130437", "0.42797002", "0.4274325", "0.42653236", "0.42643374", "0.42434865", "0.42221138", "0.4187704", "0.41818184", "0.41816905", "0.41586462", "0.41431233", "0.41431233", "0.41338494", "0.41243663", "0.41192374", "0.40958276", "0.40908718", "0.4085654", "0.4072869", "0.4067512", "0.40554562", "0.4054755", "0.4052973", "0.40318465", "0.40281138", "0.4023924", "0.40165463", "0.4007648", "0.40007922", "0.39919925", "0.3985967", "0.3985206", "0.3976194", "0.39680573", "0.39633933", "0.3959032", "0.39565736", "0.39504534", "0.39477402", "0.3941606", "0.3941206", "0.3938899", "0.39339805", "0.392653", "0.3921676", "0.3921676", "0.39155906", "0.39034832", "0.38955325", "0.3886406", "0.388535", "0.388413", "0.38791516", "0.38744602", "0.3873034", "0.386791", "0.38658866", "0.38643166", "0.38632104", "0.3861385", "0.38612008", "0.38529667", "0.38470778" ]
0.6661214
0
GET /incidentfiles GET /incidentfiles.json
def index @incidentfiles = Incidentfile.all end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show\n @treq = Treq.find(params[:id])\n @treq_files = @treq.treq_files.all\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @treq }\n end\n end", "def index\n @incidentattachments = Incidentattachment.all\n @incident = Incident.all\n end", "def set_incidentfile\n @incidentfile = Incidentfile.find(params[:id])\n end", "def files\n @files=get_endpoint('extra').keys\n end", "def index\n @ticket_files = TicketFile.all\n end", "def discover_files\n authorize! :create, resource_class\n respond_to do |f|\n f.json do\n render json: file_locator.to_h\n end\n end\n end", "def index\n @file_versions = FileVersion.all\n\n render json: @file_versions\n end", "def show\n @meeting = Meeting.find(params[:id])\n @json_from_file = 0\n for file in @meeting.file.attachments\n if file.content_type == 'application/json'\n download_file_from_s3('smartmeetingsbelieving', \"./tmp/\" + file.filename.to_s(), file.filename.to_s())\n @json_from_file = File.read(\"tmp/\" + file.filename.to_s())\n end\n end\n end", "def server_get_file(server, path)\n request(\n :path => \"containers/#{server.id}/files\",\n :params => {\n :path => path\n },\n :disable_body_extraction => true\n ).get(:body)\n end", "def index\n @inciting_incidents = IncitingIncident.all\n render json: @inciting_incidents\n end", "def index\n @agent_import_files = AgentImportFile.page(params[:page])\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render :json => @agent_import_files }\n end\n end", "def get_file(file_id)\n\tputs \"Getting file: \" + file_id\n\tresponse = request_get('/api/partner/file/' + file_id)\n\tputs response.body\nend", "def file_data\n @client.get_file @file_url\n end", "def files\n db = Database.find(params[:id])\n @files = Dir.entries(db.path)\n @files.delete_if{|f| !f.include?'.dat'}\n @results = []\n @files.each do |entry|\n @results << {:name=>entry,:version=>db.version}\n end\n respond_to do |format|\n format.html\n format.json { render json: @results }\n end\n end", "def index\n @event_import_files = EventImportFile.page(params[:page])\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @event_import_files }\n end\n end", "def create\n @incidentfile = Incidentfile.new(incidentfile_params)\n\n respond_to do |format|\n if @incidentfile.save\n format.html { redirect_to @incidentfile, notice: 'Incidentfile was successfully created.' }\n format.json { render :show, status: :created, location: @incidentfile }\n else\n format.html { render :new }\n format.json { render json: @incidentfile.errors, status: :unprocessable_entity }\n end\n end\n end", "def index\n @event_import_files = EventImportFile.page(params[:page])\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render :json => @event_import_files }\n end\n end", "def index\n @inventory_files = InventoryFile.page(params[:page])\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render :json => @inventory_files }\n end\n end", "def index\n remove_empty_incident\n @incidents = @project.incidents\n respond_with @incidents\n end", "def files\n results\n rescue ApiStruct::EntityError\n result\n end", "def get_file_status(file_path)\n response = HTTParty.get(\"https://#{accountName}.azuredatalakestore.net\" +\n \"/webhdfs/v1/#{file_path}?op=GETFILESTATUS\", {\n body: \"grant_type=client_credentials&client_id=#{clientId}\"+\n \"&client_secret=#{clientSecret}\"+\n \"&resource=https%3A%2F%2Fmanagement.azure.com%2F\",\n headers: {\n \"Authorization\" => \"Bearer #{bearerToken}\",\n \"Accept\" => \"*/*\",\n \"Cache-Control\" => 'no-cache',\n \"Host\" => \"#{accountName}.azuredatalakestore.net\",\n \"Connection\" => 'keep-alive',\n \"cache-control\" => 'no-cache'\n },\n verify: true,\n })\n\n return JSON.parse response.read_body\n end", "def file_get(id)\n response = get('FileService.getFile', id)\n end", "def json_files\n file_list '**/*.json'\n end", "def show\n @indexed_file = IndexedFile.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @indexed_file }\n end\n end", "def parse_files_json(file)\n\n files_hash = convert_json(b2_list_file_names(file))\n files = {}\n\n files_hash[\"files\"].each do |file_hash|\n files[file_hash[\"fileName\"]] = file_hash[\"fileId\"]\n end\n\n return files\n\nend", "def get_files_list(request)\n http_request = request.to_http_info(@api_client.config)\n make_request(http_request, :GET, 'FilesList')\n end", "def list_files\n source_dir = Path.new(params[:source_dir])\n if params.has_key?(:show_catalogues)\n show_catalogues = params[:show_catalogues]\n else\n show_catalogues = false\n end\n if params[:ext].present?\n file_type = params[:ext]\n else\n file_type = nil\n end\n render json: source_dir.files(file_type: file_type, show_catalogues: show_catalogues)\n end", "def download_incident_list(file_name)\n raw_file_name = File.expand_path(\"~/Downloads/incidents.csv\")\n if File.exists?(raw_file_name)\n puts \"Deleted old Downloads file #{raw_file_name}\"\n FileUtils.rm(raw_file_name)\n end\n \n puts \"Request report from #{incident_list_url}\"\n `open \"#{incident_list_url}\"`\n until File.exists?(raw_file_name) do\n puts \"Waiting for file to download\"\n sleep(1)\n end\n\n if File.exists?(file_name)\n puts \"Deleted old incidents/raw file #{file_name}\"\n FileUtils.rm(file_name)\n end\n\n `mv #{raw_file_name} #{file_name}`\n puts \"#{raw_file_name} written to #{file_name}\"\n end", "def list_files\n User.sync_files!(@context)\n files = user_real_files(params, @context)\n\n if unsafe_params[:limit] && unsafe_params[:offset]\n files = files.limit(unsafe_params[:limit]).offset(unsafe_params[:offset])\n end\n\n search_string = params[:search_string].presence || \"\"\n\n result = files.eager_load(:license, user: :org).\n where(\"nodes.name LIKE ?\", \"%#{search_string}%\").\n order(id: :desc).map do |file|\n describe_for_api(file, unsafe_params[:describe])\n end.compact\n\n render json: unsafe_params[:offset]&.zero? ? { objects: result, count: result.length } : result\n end", "def show\n @capa = Capa.find(params[:id])\n @capa_files = @capa.capa_files.all\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @capa }\n end\n end", "def index\n @casefiles = Casefile.all\n end", "def incident_list(statuspage_id)\n request :method => :get,\n :url => @url + 'incident/list/' + statuspage_id\n end", "def fetch_file(file_path)\n client.get_file(file_path)\n end", "def files\n info[\"Files\"].to_a\n end", "def show\n @incident = Incident.find(params[:id])\n\n render json: @incident\n end", "def read\n status = 200\n\n # File path\n fpath = filepathById params[:id]\n\n if nil == fpath\n # File description does not exists\n result = {status: 'error', message: 'Bad request'}\n status = 400\n elsif File.exists? fpath\n result = {content: File.read(fpath)}\n else\n result = {content: ''}\n end\n render json: result.to_json, status: status\n end", "def show\n puts params[:id]\n @file_versions = FileVersion.where(versioned_file_id: params[:id]) \n #@file_versions = FileVersion.find(:versioned_file_id => params[:versioned_file_id])\n render json: @file_versions\n end", "def show\n\n @observation = Observation.find(params[:id])\n @coral = Coral.find(params[:coral_id])\n\n @files = Dir.glob(\"app/assets/images/tagged_outlines_thumbs/*\")\n \n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @observation }\n end\n end", "def files_get(opts = {})\n files_get_with_http_info(opts)\n end", "def get_fileset\n\n filesets = batched_get( { id: params[:id] } )\n if filesets.empty?\n render_json_fileset_response(:not_found )\n else\n render_json_fileset_response(:ok, fileset_transform( filesets ) )\n end\n end", "def index\n @event_import_file = EventImportFile.where(id: params[:event_import_file_id]).first\n if @event_import_file\n @event_import_results = @event_import_file.event_import_results.page(params[:page])\n else\n @event_import_results = EventImportResult.page(params[:page])\n end\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @event_import_results }\n format.txt\n end\n end", "def show\n @file_info = FileInfo.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @file_info }\n end\n end", "def getPullRequestFiles(id)\n getFilesFromDiff(getPullRequestDiff(id))\n end", "def show\n @resource_file = ResourceFile.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @resource_file }\n end\n end", "def files_get_with_http_info(opts = {})\n if @api_client.config.debugging\n @api_client.config.logger.debug \"Calling API: FilesApi.files_get ...\"\n end\n # resource path\n local_var_path = \"/files\"\n\n # query parameters\n query_params = {}\n query_params[:'start'] = opts[:'start'] if !opts[:'start'].nil?\n query_params[:'limit'] = opts[:'limit'] if !opts[:'limit'].nil?\n query_params[:'include_deleted_files'] = opts[:'include_deleted_files'] if !opts[:'include_deleted_files'].nil?\n query_params[:'sort'] = opts[:'sort'] if !opts[:'sort'].nil?\n\n # header parameters\n header_params = {}\n\n # form parameters\n form_params = {}\n\n # http body (model)\n post_body = nil\n auth_names = [ 'access_token' ]\n response = @api_client.call_api(:GET, local_var_path,\n :header_params => header_params,\n :query_params => query_params,\n :form_params => form_params,\n :body => post_body,\n :auth_names => auth_names)\n if @api_client.config.debugging\n @api_client.config.logger.debug \"API called: FilesApi#files_get\\nData: #{data.inspect}\\nStatus code: #{status_code}\\nHeaders: #{headers}\"\n end\n return response\n end", "def files\n return get_result('files')\n end", "def get_file_summary(file_path)\n response = HTTParty.get(\"https://#{accountName}.azuredatalakestore.net\" +\n \"/webhdfs/v1/#{file_path}?op=GETCONTENTSUMMARY\", {\n body: \"grant_type=client_credentials&client_id=#{clientId}\"+\n \"&client_secret=#{clientSecret}\"+\n \"&resource=https%3A%2F%2Fmanagement.azure.com%2F\",\n headers: {\n \"Authorization\" => \"Bearer #{bearerToken}\",\n \"Accept\" => \"*/*\",\n \"Cache-Control\" => 'no-cache',\n \"Host\" => \"#{accountName}.azuredatalakestore.net\",\n \"Connection\" => 'keep-alive',\n \"cache-control\" => 'no-cache'\n },\n verify: true,\n })\n \n return JSON.parse response.read_body\n end", "def show\n render json: @inciting_incident\n end", "def incidentfile_params\n params.require(:incidentfile).permit(:incident_id, :filetype, :state)\n end", "def index\n @money_arrival_files = MoneyArrivalFile.all\n end", "def show\n @meeting = Meeting.find(params[:meeting_id])\n @userfiles = @meeting.userfiles\n end", "def index\n @file_infos = FileInfo.includes(:component).all\n end", "def show\n @incident = Incident.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @incident }\n end\n end", "def show_uploaded_file\n @organization = Organization.unscoped.find(params[:id])\n @registered_applications = RegisteredApp.all\n if params[:registered_app_id].present?\n @audit_trails = @organization.audit_trails.where(\"fk_registered_app_id =? \", params[:registered_app_id]).order(:createddate)\n else\n if @registered_applications.first.present?\n @audit_trails = @organization.audit_trails.where(\"fk_registered_app_id =?\", @registered_applications.first.id).order(:createddate)\n end\n end\n respond_to do |format|\n format.html\n format.js {\n result = render_to_string(partial: \"/admin/organizations/uploaded_files\", locals: { audit_trails: @audit_trails })\n render json: { html: result }\n }\n end\n end", "def uploaded_file\n initalize_breadcrumb(\"Uploaded File(s)\", uploadedfile_datauploaders_path)\n currentUser = current_user.id\n @uploadedFiles = UserFileMapping.where(:user_id =>currentUser )\n respond_with(@uploadedFiles)\n end", "def file(id)\n # Requires authorization\n raise PutioError::AuthorizationRequired if authentication_required!\n\n response = make_get_call('/files/%i' % [id])\n response.download = download(id)\n\n response\n end", "def show\n @userfile = Userfile.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @userfile }\n end\n end", "def get_firmware_files\n response = request(:get, \"/devmgr/v2/firmware/cfw-files/\")\n status(response, 200, 'Failed to get uploaded firmware file list')\n JSON.parse(response.body)\n end", "def index\n @incidentcategories = Incidentcategory.all\n json_response(@incidentcategories)\n end", "def index\n @title = \"User uploaded files\"\n get_files(params)\n end", "def update\n respond_to do |format|\n if @incidentfile.update(incidentfile_params)\n format.html { redirect_to @incidentfile, notice: 'Incidentfile was successfully updated.' }\n format.json { render :show, status: :ok, location: @incidentfile }\n else\n format.html { render :edit }\n format.json { render json: @incidentfile.errors, status: :unprocessable_entity }\n end\n end\n end", "def index\n @userfiles = Userfile.all\n end", "def index\n @attachfiles = Attachfile.all\n end", "def show\n @file_record = FileRecord.find(params[:id])\n @file_versions = @file_record.file_versions.all\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @file_record }\n format.json { render json: {file_record: @file_record,file_versions: @file_versions} }\n end\n end", "def get_file_listing\n execute!(drive.files.list).data\n end", "def index\n @cfiles = Cfile.all\n end", "def retrieve_cloud_files(files); end", "def show\n @bulletin_file = BulletinFile.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @bulletin_file }\n end\n end", "def file(file_type, id)\n query = \"/?client_id=#{@client_id}&#{id}\"\n path = \"#{file_type}/#{__method__}.to_s\"\n resp = http_get(path, query)\n end", "def get_the_individual_file_to_be_processed\n # p \"individual file selection\"\n files = GetFiles.get_all_of_the_filenames(@project.freecen_files_directory, @project.file_range)\n files\n end", "def show\n @report_file = ReportFile.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @report_file }\n end\n end", "def show\n @action_file = ActionFile.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @action_file }\n end\n end", "def index\n @expenses_files = ExpensesFile.all\n end", "def files(trading_partner_id, filename)\n scope 'default'\n url = URI.parse(@api_url + '/files/')\n\n File.open(filename) do |f|\n req = Net::HTTP::Post::Multipart.new url.path,\n 'file' => UploadIO.new(f, 'application/EDI-X12', filename),\n 'trading_partner_id' => trading_partner_id\n req['Authorization'] = \"Bearer #{default_scope.token}\"\n req['User-Agent'] = user_agent\n\n @response = Net::HTTP.start(url.host, url.port) do |http|\n http.request(req)\n end\n end\n\n JSON.parse(@response.body)\n end", "def get_intent_to_file\n with_monitoring_and_error_handling do\n raw_response = perform(:get, '')\n EVSS::IntentToFile::IntentToFilesResponse.new(raw_response.status, raw_response)\n end\n end", "def show\n @product_bulletin = ProductBulletin.find(params[:id])\n @bulletin_files = @product_bulletin.bulletin_files.all\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product_bulletin }\n end\n end", "def show\n @test_file = TestFile.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @test_file }\n end\n end", "def get_template_files(opts)\n path = \"/template/files/#{opts[:template_id]}\"\n if opts[:file_type]\n path = path + \"?file_type=#{opts[:file_type]}\"\n end\n if opts[:get_url]\n separator = opts[:file_type].nil? ? '?' : '&'\n path = path + \"#{separator}get_url=#{opts[:get_url]}\"\n end\n\n get(path)\n end", "def index\n @backup_files = BackupFile.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @backup_files }\n end\n end", "def files\n result = form.select_files.map do |label, id|\n { id: id, text: label }\n end\n render json: result\n end", "def index\n @workfiles = Workfile.all\n end", "def audit\n render :json=>@generic_file.audit\n end", "def object_files\n @object_files = get_files(params[:id]) if controller_name == 'catalog' || controller_name == 'image_viewer'\n end", "def files\n result = form.select_files.map { |label, id| { id: id, text: label } }\n render json: result\n end", "def index\n @commit_filepaths = CommitFilepath.all\n end", "def files_info(params = {})\n fail ArgumentError, \"Required arguments 'file' missing\" if params['file'].nil?\n response = @session.do_post \"#{SCOPE}.info\", params\n Slack.parse_response(response)\n end", "def files\n result = form.select_files.map do |label, id|\n { id: id, text: label }\n end\n render json: result\n end", "def files\n result = form.select_files.map do |label, id|\n { id: id, text: label }\n end\n render json: result\n end", "def get_files(query_obj=nil,with_nested_resources=false)\n uri = URI.parse(@uri + \"/Files\")\n results = get(uri,query_obj,with_nested_resources)\n end", "def tracked_files; end", "def index\n Dir[\"#{@base_path}/*.json\"].map{|p| File.basename(p)}\n end", "def index\n @file_upload_attachments = FileUploadAttachment.all\n end", "def index\n @other_files = OtherFile.all\n end", "def index\n @resource_import_files = ResourceImportFile.page(params[:page])\n\n respond_to do |format|\n format.html # index.html.erb\n end\n end", "def run\n super\n \n require_enrichment\n\n body = http_get_body \"#{_get_entity_name}/+CSCOU+/../+CSCOE+/files/file_list.json?path=/sessions\"\n \n if body =~ /\\/\\/\\/sessions/\n _create_linked_issue \"cisco_asa_path_traversal_cve_2018_0296\", { \"proof\" => body }\n end\n\n end", "def index\n @asset_files = current_user.asset_files\n end", "def get_files\n return @files if @files.present?\n raise \"No user token present\" unless access[:user_token].present?\n @files ||= get(proviso_url + \"files\")\n @files.presence || raise(\"No files available\")\n end", "def signature_request_files(opts)\n path = \"/signature_request/files/#{opts[:signature_request_id]}\"\n if opts[:file_type]\n path = path + \"?file_type=#{opts[:file_type]}\"\n end\n\n if opts[:get_url]\n separator = opts[:file_type].nil? ? '?' : '&'\n path = path + \"#{separator}get_url=#{opts[:get_url]}\"\n elsif opts[:get_data_uri]\n separator = opts[:file_type].nil? ? '?' : '&'\n path = path + \"#{separator}get_data_uri=#{opts[:get_data_uri]}\"\n end\n\n get(path)[:body]\n end", "def show\n @up_file = UpFile.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @up_file }\n end\n end", "def api(path)\n OodAppkit.files.api(path: path).to_s\n end" ]
[ "0.63998556", "0.63369095", "0.6141543", "0.609517", "0.6063753", "0.60319984", "0.59550637", "0.59429413", "0.59327", "0.59200436", "0.59195596", "0.5896033", "0.586666", "0.58649904", "0.58425474", "0.58373386", "0.5836639", "0.58086884", "0.5807181", "0.57908887", "0.5773914", "0.5756622", "0.5749288", "0.57449067", "0.5737331", "0.57188636", "0.57147515", "0.56913966", "0.5690293", "0.5660659", "0.5658157", "0.56569904", "0.5635158", "0.5634111", "0.56274873", "0.5627444", "0.5613872", "0.56124634", "0.5610243", "0.560884", "0.5608335", "0.56026596", "0.5580366", "0.5573004", "0.5555359", "0.5554968", "0.55503505", "0.55488086", "0.5544454", "0.55312824", "0.55298847", "0.55195117", "0.55062073", "0.5495715", "0.54939234", "0.54877365", "0.54876906", "0.5483682", "0.5479308", "0.5474783", "0.54687935", "0.54615587", "0.5461443", "0.5455374", "0.54540336", "0.5448901", "0.54446155", "0.5435001", "0.5427503", "0.5422105", "0.54208964", "0.541561", "0.54110986", "0.5400003", "0.5392572", "0.5386107", "0.53760123", "0.5367638", "0.53674877", "0.5364588", "0.5359838", "0.5356548", "0.53559625", "0.5353444", "0.53523225", "0.535213", "0.53433007", "0.53433007", "0.5336118", "0.533504", "0.5324984", "0.5322667", "0.53177327", "0.5312282", "0.53117865", "0.5309692", "0.53091496", "0.530609", "0.5302099", "0.5297065" ]
0.7626278
0
GET /incidentfiles/1 GET /incidentfiles/1.json
def show end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index\n @incidentfiles = Incidentfile.all\n end", "def show\n @treq = Treq.find(params[:id])\n @treq_files = @treq.treq_files.all\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @treq }\n end\n end", "def get_file(file_id)\n\tputs \"Getting file: \" + file_id\n\tresponse = request_get('/api/partner/file/' + file_id)\n\tputs response.body\nend", "def set_incidentfile\n @incidentfile = Incidentfile.find(params[:id])\n end", "def show\n @indexed_file = IndexedFile.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @indexed_file }\n end\n end", "def file_get(id)\n response = get('FileService.getFile', id)\n end", "def server_get_file(server, path)\n request(\n :path => \"containers/#{server.id}/files\",\n :params => {\n :path => path\n },\n :disable_body_extraction => true\n ).get(:body)\n end", "def show\n puts params[:id]\n @file_versions = FileVersion.where(versioned_file_id: params[:id]) \n #@file_versions = FileVersion.find(:versioned_file_id => params[:versioned_file_id])\n render json: @file_versions\n end", "def read\n status = 200\n\n # File path\n fpath = filepathById params[:id]\n\n if nil == fpath\n # File description does not exists\n result = {status: 'error', message: 'Bad request'}\n status = 400\n elsif File.exists? fpath\n result = {content: File.read(fpath)}\n else\n result = {content: ''}\n end\n render json: result.to_json, status: status\n end", "def show\n @meeting = Meeting.find(params[:id])\n @json_from_file = 0\n for file in @meeting.file.attachments\n if file.content_type == 'application/json'\n download_file_from_s3('smartmeetingsbelieving', \"./tmp/\" + file.filename.to_s(), file.filename.to_s())\n @json_from_file = File.read(\"tmp/\" + file.filename.to_s())\n end\n end\n end", "def show\n @file_info = FileInfo.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @file_info }\n end\n end", "def file(file_type, id)\n query = \"/?client_id=#{@client_id}&#{id}\"\n path = \"#{file_type}/#{__method__}.to_s\"\n resp = http_get(path, query)\n end", "def index\n @incidentattachments = Incidentattachment.all\n @incident = Incident.all\n end", "def show\n @resource_file = ResourceFile.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @resource_file }\n end\n end", "def fetch_file(file_path)\n client.get_file(file_path)\n end", "def file_data\n @client.get_file @file_url\n end", "def show\n @action_file = ActionFile.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @action_file }\n end\n end", "def index\n @file_versions = FileVersion.all\n\n render json: @file_versions\n end", "def files\n db = Database.find(params[:id])\n @files = Dir.entries(db.path)\n @files.delete_if{|f| !f.include?'.dat'}\n @results = []\n @files.each do |entry|\n @results << {:name=>entry,:version=>db.version}\n end\n respond_to do |format|\n format.html\n format.json { render json: @results }\n end\n end", "def index\n @ticket_files = TicketFile.all\n end", "def create\n @incidentfile = Incidentfile.new(incidentfile_params)\n\n respond_to do |format|\n if @incidentfile.save\n format.html { redirect_to @incidentfile, notice: 'Incidentfile was successfully created.' }\n format.json { render :show, status: :created, location: @incidentfile }\n else\n format.html { render :new }\n format.json { render json: @incidentfile.errors, status: :unprocessable_entity }\n end\n end\n end", "def show\n @userfile = Userfile.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @userfile }\n end\n end", "def show\n @capa = Capa.find(params[:id])\n @capa_files = @capa.capa_files.all\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @capa }\n end\n end", "def file(id)\n # Requires authorization\n raise PutioError::AuthorizationRequired if authentication_required!\n\n response = make_get_call('/files/%i' % [id])\n response.download = download(id)\n\n response\n end", "def index\n @event_import_file = EventImportFile.where(id: params[:event_import_file_id]).first\n if @event_import_file\n @event_import_results = @event_import_file.event_import_results.page(params[:page])\n else\n @event_import_results = EventImportResult.page(params[:page])\n end\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @event_import_results }\n format.txt\n end\n end", "def get_file_status(file_path)\n response = HTTParty.get(\"https://#{accountName}.azuredatalakestore.net\" +\n \"/webhdfs/v1/#{file_path}?op=GETFILESTATUS\", {\n body: \"grant_type=client_credentials&client_id=#{clientId}\"+\n \"&client_secret=#{clientSecret}\"+\n \"&resource=https%3A%2F%2Fmanagement.azure.com%2F\",\n headers: {\n \"Authorization\" => \"Bearer #{bearerToken}\",\n \"Accept\" => \"*/*\",\n \"Cache-Control\" => 'no-cache',\n \"Host\" => \"#{accountName}.azuredatalakestore.net\",\n \"Connection\" => 'keep-alive',\n \"cache-control\" => 'no-cache'\n },\n verify: true,\n })\n\n return JSON.parse response.read_body\n end", "def show\n @report_file = ReportFile.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @report_file }\n end\n end", "def show\n @fileversion = Fileversion.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @fileversion }\n end\n end", "def index\n @inventory_files = InventoryFile.page(params[:page])\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render :json => @inventory_files }\n end\n end", "def files\n @files=get_endpoint('extra').keys\n end", "def show\n @file_record = FileRecord.find(params[:id])\n @file_versions = @file_record.file_versions.all\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @file_record }\n format.json { render json: {file_record: @file_record,file_versions: @file_versions} }\n end\n end", "def show\n @test_file = TestFile.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @test_file }\n end\n end", "def show\n @super_file = SuperFile.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @super_file }\n end\n end", "def show\n @file_version = FileVersion.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @file_version }\n end\n end", "def show\n @uploaded_file = UploadedFile.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @uploaded_file }\n end\n end", "def get_fileset\n\n filesets = batched_get( { id: params[:id] } )\n if filesets.empty?\n render_json_fileset_response(:not_found )\n else\n render_json_fileset_response(:ok, fileset_transform( filesets ) )\n end\n end", "def files_get_with_http_info(opts = {})\n if @api_client.config.debugging\n @api_client.config.logger.debug \"Calling API: FilesApi.files_get ...\"\n end\n # resource path\n local_var_path = \"/files\"\n\n # query parameters\n query_params = {}\n query_params[:'start'] = opts[:'start'] if !opts[:'start'].nil?\n query_params[:'limit'] = opts[:'limit'] if !opts[:'limit'].nil?\n query_params[:'include_deleted_files'] = opts[:'include_deleted_files'] if !opts[:'include_deleted_files'].nil?\n query_params[:'sort'] = opts[:'sort'] if !opts[:'sort'].nil?\n\n # header parameters\n header_params = {}\n\n # form parameters\n form_params = {}\n\n # http body (model)\n post_body = nil\n auth_names = [ 'access_token' ]\n response = @api_client.call_api(:GET, local_var_path,\n :header_params => header_params,\n :query_params => query_params,\n :form_params => form_params,\n :body => post_body,\n :auth_names => auth_names)\n if @api_client.config.debugging\n @api_client.config.logger.debug \"API called: FilesApi#files_get\\nData: #{data.inspect}\\nStatus code: #{status_code}\\nHeaders: #{headers}\"\n end\n return response\n end", "def files\n results\n rescue ApiStruct::EntityError\n result\n end", "def show\n @bulletin_file = BulletinFile.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @bulletin_file }\n end\n end", "def index\n @agent_import_files = AgentImportFile.page(params[:page])\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render :json => @agent_import_files }\n end\n end", "def discover_files\n authorize! :create, resource_class\n respond_to do |f|\n f.json do\n render json: file_locator.to_h\n end\n end\n end", "def parse_files_json(file)\n\n files_hash = convert_json(b2_list_file_names(file))\n files = {}\n\n files_hash[\"files\"].each do |file_hash|\n files[file_hash[\"fileName\"]] = file_hash[\"fileId\"]\n end\n\n return files\n\nend", "def get_file_summary(file_path)\n response = HTTParty.get(\"https://#{accountName}.azuredatalakestore.net\" +\n \"/webhdfs/v1/#{file_path}?op=GETCONTENTSUMMARY\", {\n body: \"grant_type=client_credentials&client_id=#{clientId}\"+\n \"&client_secret=#{clientSecret}\"+\n \"&resource=https%3A%2F%2Fmanagement.azure.com%2F\",\n headers: {\n \"Authorization\" => \"Bearer #{bearerToken}\",\n \"Accept\" => \"*/*\",\n \"Cache-Control\" => 'no-cache',\n \"Host\" => \"#{accountName}.azuredatalakestore.net\",\n \"Connection\" => 'keep-alive',\n \"cache-control\" => 'no-cache'\n },\n verify: true,\n })\n \n return JSON.parse response.read_body\n end", "def show\n @up_file = UpFile.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @up_file }\n end\n end", "def get(container_name, file_name)\n validate_path_elements(container_name, file_name)\n\n File.from_response(\n file_name,\n client.request_raw_response(\n method: :get,\n path: \"#{container_name}/#{file_name}\",\n expected: 200,\n )\n )\n end", "def get_file_details(id)\n uri = ENDPOINT + \"file/details/#{key}/#{id}\"\n data = JSON.parse(self.class.get(uri).body, :symbolize_names => true)\n Reach::Helper::convert_keys(data)\n end", "def index\n @event_import_files = EventImportFile.page(params[:page])\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @event_import_files }\n end\n end", "def index\n @event_import_files = EventImportFile.page(params[:page])\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render :json => @event_import_files }\n end\n end", "def show\n @file_sequence = FileSequence.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @file_sequence }\n end\n end", "def get_the_individual_file_to_be_processed\n # p \"individual file selection\"\n files = GetFiles.get_all_of_the_filenames(@project.freecen_files_directory, @project.file_range)\n files\n end", "def show\n @breadcrumb = 'read'\n @processed_file = ProcessedFile.find(params[:id])\n @processed_file_items = @processed_file.processed_file_items.paginate(:page => params[:page], :per_page => per_page)\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @processed_file }\n end\n end", "def download_incident_list(file_name)\n raw_file_name = File.expand_path(\"~/Downloads/incidents.csv\")\n if File.exists?(raw_file_name)\n puts \"Deleted old Downloads file #{raw_file_name}\"\n FileUtils.rm(raw_file_name)\n end\n \n puts \"Request report from #{incident_list_url}\"\n `open \"#{incident_list_url}\"`\n until File.exists?(raw_file_name) do\n puts \"Waiting for file to download\"\n sleep(1)\n end\n\n if File.exists?(file_name)\n puts \"Deleted old incidents/raw file #{file_name}\"\n FileUtils.rm(file_name)\n end\n\n `mv #{raw_file_name} #{file_name}`\n puts \"#{raw_file_name} written to #{file_name}\"\n end", "def show\n\n @observation = Observation.find(params[:id])\n @coral = Coral.find(params[:coral_id])\n\n @files = Dir.glob(\"app/assets/images/tagged_outlines_thumbs/*\")\n \n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @observation }\n end\n end", "def file\n files.first\n end", "def file\n files.first\n end", "def get_file(file_id)\n raise ArgumentError, \"Only one file id allowed for this method\" if file_id.is_a?(Array)\n get_files(file_id).first\n end", "def show\n @purchase_file = PurchaseFile.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @purchase_file }\n end\n end", "def show\n @ufile = Ufile.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @ufile }\n end\n end", "def index\n @file_infos = FileInfo.includes(:component).all\n end", "def update\n respond_to do |format|\n if @incidentfile.update(incidentfile_params)\n format.html { redirect_to @incidentfile, notice: 'Incidentfile was successfully updated.' }\n format.json { render :show, status: :ok, location: @incidentfile }\n else\n format.html { render :edit }\n format.json { render json: @incidentfile.errors, status: :unprocessable_entity }\n end\n end\n end", "def file_by_id(id)\n api_result = execute!(\n :api_method => self.drive.files.get,\n :parameters => { \"fileId\" => id })\n return wrap_api_file(api_result.data)\n end", "def api(path)\n OodAppkit.files.api(path: path).to_s\n end", "def index\n @title = \"User uploaded files\"\n get_files(params)\n end", "def show\n @digital_content_file = DigitalContentFile.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @digital_content_file }\n end\n end", "def show\n @incident = Incident.find(params[:id])\n\n render json: @incident\n end", "def index\n @casefiles = Casefile.all\n end", "def index\n @money_arrival_files = MoneyArrivalFile.all\n end", "def uploaded_file\n initalize_breadcrumb(\"Uploaded File(s)\", uploadedfile_datauploaders_path)\n currentUser = current_user.id\n @uploadedFiles = UserFileMapping.where(:user_id =>currentUser )\n respond_with(@uploadedFiles)\n end", "def show_uploaded_file\n @organization = Organization.unscoped.find(params[:id])\n @registered_applications = RegisteredApp.all\n if params[:registered_app_id].present?\n @audit_trails = @organization.audit_trails.where(\"fk_registered_app_id =? \", params[:registered_app_id]).order(:createddate)\n else\n if @registered_applications.first.present?\n @audit_trails = @organization.audit_trails.where(\"fk_registered_app_id =?\", @registered_applications.first.id).order(:createddate)\n end\n end\n respond_to do |format|\n format.html\n format.js {\n result = render_to_string(partial: \"/admin/organizations/uploaded_files\", locals: { audit_trails: @audit_trails })\n render json: { html: result }\n }\n end\n end", "def show\n @meeting = Meeting.find(params[:meeting_id])\n @userfiles = @meeting.userfiles\n end", "def get\n text_file = TextFile.find_by(name: params['name'])\n response = {}\n if text_file\n response = create_response(text_file)\n end\n render json: response\n end", "def show\n @mfile = Mfile.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @mfile }\n end\n end", "def getPullRequestFiles(id)\n getFilesFromDiff(getPullRequestDiff(id))\n end", "def get_file_details(file_id)\n begin\n drive = @client.discovered_api('drive', 'v2')\n result = @client.execute(:api_method => drive.files.get, :parameters => { 'fileId' => file_id })\n rescue\n return nil\n end\n \n if result.status == 200\n self.item_into_standard_format(result.data) if result.data.present?\n else\n nil\n end\n end", "def list_files\n source_dir = Path.new(params[:source_dir])\n if params.has_key?(:show_catalogues)\n show_catalogues = params[:show_catalogues]\n else\n show_catalogues = false\n end\n if params[:ext].present?\n file_type = params[:ext]\n else\n file_type = nil\n end\n render json: source_dir.files(file_type: file_type, show_catalogues: show_catalogues)\n end", "def get_intent_to_file\n with_monitoring_and_error_handling do\n raw_response = perform(:get, '')\n EVSS::IntentToFile::IntentToFilesResponse.new(raw_response.status, raw_response)\n end\n end", "def files_get(opts = {})\n files_get_with_http_info(opts)\n end", "def get_files_list(request)\n http_request = request.to_http_info(@api_client.config)\n make_request(http_request, :GET, 'FilesList')\n end", "def show\n @file_project = FileProject.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @file_project }\n end\n end", "def show\n @incident = Incident.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @incident }\n end\n end", "def index\n raise ArgumentError, 'Missing file parameter' if filename.blank?\n\n @available_in_workspace = Dor::Services::Client.object(params[:item_id]).files.list.include?(filename)\n\n respond_to do |format|\n format.html { render layout: !request.xhr? }\n end\n end", "def show\n endnote_file = EndnoteFile.find_by_id(params[:id])\n\n if endnote_file\n @response[:endnote_file] = endnote_file\n else\n error_msg(ErrorCodes::OBJECT_ERROR,\"#{I18n.t \"endnote_files.errors.cannot_show_file\"}: #{params[:id]}\")\n end\n render_json\n end", "def index\n @attachfiles = Attachfile.all\n end", "def show\n @file_item = FileItem.cs(self.current_scope).find_by_path(params[:id])\n\n respond_to do |format|\n format.html do\n case @file_item.item_type\n when Saphira::FileItem::TYPE_FOLDER\n @file_items = @file_item.children\n render :action => 'index'\n else\n render\n end\n end\n format.json { render :json => @file_item }\n end\n end", "def index\n @other_files = OtherFile.all\n end", "def report_file1_download(id)\r\n\t\tpost= { \"token\" => @token, \"report\" => id, \"v1\" => \"true\" } \r\n\t\tfile=nessus_http_request('file/report/download', post)\r\n\t\treturn file\r\n\tend", "def do_GET(req,res,filename)\n st = File::stat(filename)\n mtime = st.mtime\n res['etag'] = sprintf(\"%x-%x-%x\", st.ino, st.size, st.mtime.to_i)\n if not_modified?(req, res, mtime, res['etag'])\n res.body = ''\n raise WEBrick::HTTPStatus::NotModified\n else\n res['content-type'] = WEBrick::HTTPUtils::mime_type(filename, @config[:MimeTypes])\n res['content-length'] = st.size\n res['last-modified'] = mtime.httpdate\n res.body = open(filename, \"rb\")\n end\n end", "def index\n @inciting_incidents = IncitingIncident.all\n render json: @inciting_incidents\n end", "def list_files\n User.sync_files!(@context)\n files = user_real_files(params, @context)\n\n if unsafe_params[:limit] && unsafe_params[:offset]\n files = files.limit(unsafe_params[:limit]).offset(unsafe_params[:offset])\n end\n\n search_string = params[:search_string].presence || \"\"\n\n result = files.eager_load(:license, user: :org).\n where(\"nodes.name LIKE ?\", \"%#{search_string}%\").\n order(id: :desc).map do |file|\n describe_for_api(file, unsafe_params[:describe])\n end.compact\n\n render json: unsafe_params[:offset]&.zero? ? { objects: result, count: result.length } : result\n end", "def incidentfile_params\n params.require(:incidentfile).permit(:incident_id, :filetype, :state)\n end", "def show\n @product_bulletin = ProductBulletin.find(params[:id])\n @bulletin_files = @product_bulletin.bulletin_files.all\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @product_bulletin }\n end\n end", "def file(name)\n begin\n @name=name\n @content=get_rest(\"extra/#{@name}\")\n rescue Stingray::NotFoundError \n nil\n end\n end", "def show\n render json: @file, serializer: FileSerializer, :root => \"file\"\n end", "def files_id_get_with_http_info(id, opts = {})\n if @api_client.config.debugging\n @api_client.config.logger.debug \"Calling API: FilesApi.files_id_get ...\"\n end\n # verify the required parameter 'id' is set\n if @api_client.config.client_side_validation && id.nil?\n fail ArgumentError, \"Missing the required parameter 'id' when calling FilesApi.files_id_get\"\n end\n # resource path\n local_var_path = \"/files/{id}\".sub('{' + 'id' + '}', id.to_s)\n\n # query parameters\n query_params = {}\n\n # header parameters\n header_params = {}\n\n # form parameters\n form_params = {}\n\n # http body (model)\n post_body = nil\n auth_names = [ 'access_token' ]\n response = @api_client.call_api(:GET, local_var_path,\n :header_params => header_params,\n :query_params => query_params,\n :form_params => form_params,\n :body => post_body,\n :auth_names => auth_names)\n if @api_client.config.debugging\n @api_client.config.logger.debug \"API called: FilesApi#files_id_get\\nData: #{data.inspect}\\nStatus code: #{status_code}\\nHeaders: #{headers}\"\n end\n return response\n end", "def files(trading_partner_id, filename)\n scope 'default'\n url = URI.parse(@api_url + '/files/')\n\n File.open(filename) do |f|\n req = Net::HTTP::Post::Multipart.new url.path,\n 'file' => UploadIO.new(f, 'application/EDI-X12', filename),\n 'trading_partner_id' => trading_partner_id\n req['Authorization'] = \"Bearer #{default_scope.token}\"\n req['User-Agent'] = user_agent\n\n @response = Net::HTTP.start(url.host, url.port) do |http|\n http.request(req)\n end\n end\n\n JSON.parse(@response.body)\n end", "def get(path = '/files/', params = {})\n request :get, path, params\n end", "def show\n #@config_file = ConfigFile.find(params[:id])\n @config_file = current_user.developer.config_files.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @config_file }\n end\n end", "def index\n remove_empty_incident\n @incidents = @project.incidents\n respond_with @incidents\n end", "def show\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @inventory_file }\n format.download {\n send_file @inventory_file.inventory.download,\n filename: @inventory_file.inventory_filename,\n type: 'application/octet-stream'\n }\n end\n end", "def object_files\n @object_files = get_files(params[:id]) if controller_name == 'catalog' || controller_name == 'image_viewer'\n end", "def index\n @inventory_files = InventoryFile.page(params[:page])\n\n respond_to do |format|\n format.html # index.html.erb\n end\n end" ]
[ "0.7518787", "0.67327553", "0.6653261", "0.6556415", "0.6489992", "0.6403362", "0.6321794", "0.63129157", "0.6297115", "0.6265356", "0.6222869", "0.62089425", "0.62032455", "0.61940295", "0.618106", "0.6175662", "0.61054575", "0.61018884", "0.60957474", "0.60893846", "0.6069034", "0.60581803", "0.60556555", "0.6053107", "0.60504127", "0.6020664", "0.6017648", "0.6017061", "0.6008282", "0.6002566", "0.59992284", "0.5992146", "0.59788775", "0.59751767", "0.5962469", "0.595303", "0.5950758", "0.59504074", "0.59478176", "0.59461725", "0.5941619", "0.5941121", "0.59198177", "0.59185", "0.5916726", "0.5916047", "0.58985645", "0.5896974", "0.58854234", "0.5881236", "0.58620834", "0.5855546", "0.58330476", "0.5804137", "0.5804137", "0.5772741", "0.57705015", "0.57603353", "0.5759595", "0.5756504", "0.57515574", "0.57440495", "0.57435155", "0.5720877", "0.56999063", "0.56984264", "0.5667714", "0.566126", "0.56601775", "0.56550723", "0.56547916", "0.56511027", "0.56460124", "0.5637749", "0.5635068", "0.5628007", "0.56246054", "0.56187123", "0.56131583", "0.560669", "0.56034225", "0.5596137", "0.55955315", "0.5593745", "0.5591065", "0.55889255", "0.55857617", "0.55796444", "0.557747", "0.55765253", "0.5574564", "0.55667764", "0.5565383", "0.55646175", "0.5560435", "0.555183", "0.55310863", "0.55289155", "0.5525776", "0.55178463", "0.5516507" ]
0.0
-1
POST /incidentfiles POST /incidentfiles.json
def create @incidentfile = Incidentfile.new(incidentfile_params) respond_to do |format| if @incidentfile.save format.html { redirect_to @incidentfile, notice: 'Incidentfile was successfully created.' } format.json { render :show, status: :created, location: @incidentfile } else format.html { render :new } format.json { render json: @incidentfile.errors, status: :unprocessable_entity } end end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create\n if(params[:incId]).present?\n @incident = Incident.find(params[:incId])\n @incident.report_type = params[:incident][:report_type] \n @incident.your_name = params[:incident][:your_name]\n @incident.job_title = params[:incident][:job_title]\n @incident.injury_date = params[:incident][:injury_date]\n @incident.injury_time = params[:incident][:injury_time]\n @incident.witnesses = params[:incident][:witnesses]\n @incident.location =params[:incident][:location]\n @incident.circumstances = params[:incident][:circumstances]\n @incident.event_discription = params[:incident][:event_discription]\n @incident.injuries_type =params[:incident][:injuries_type]\n @incident.ppe_used =params[:incident][:ppe_used]\n @incident.medical_assistance_provided =params[:incident][:medical_assistance_provided]\n else\n @incident = @project.incidents.build(incident_params)\n end\n\n\n \n respond_to do |format|\n if @incident.save\n if params[:files]\n params[:files].each { |image|\n @incident.incidents_files.create(file: image, incident_id: @incident.id)\n }\n end\n @incident.cn = true\n @incident.save!\n incidents = @project.incidents.where(cn: false)\n incidents.destroy_all if incidents.present?\n format.html { redirect_to projects_path({inc: @incident.id}), notice: 'Incident was successfully created.' }\n format.json { render json: {incident_id: @incident.id} } \n # format.json { render action: 'show', status: :created, location: @incident }\n else\n format.html { render action: 'new' }\n format.json { render json: :true}\n # format.json { render json: @incident.errors, status: :unprocessable_entity }\n end\n end\n end", "def incidentfile_params\n params.require(:incidentfile).permit(:incident_id, :filetype, :state)\n end", "def create\n @treq = Treq.new(params[:treq])\n \n respond_to do |format|\n if @treq.save\n unless params[:treq_files].blank?\n params[:treq_files]['file'].each do |a|\n @treq_file = @treq.treq_files.create!(:file => a, :treq_id => @treq.id)\n end\n end\n TreqNotifier.submit_treq(@treq).deliver\n format.html { redirect_to @treq, notice: 'Treq was successfully created.' }\n format.json { render json: @treq, status: :created, location: @treq }\n else\n format.html { render action: \"new\", alert: \"Test Requset has been submitted.\"}\n format.json { render json: @treq.errors, status: :unprocessable_entity }\n end\n end\n end", "def index\n @incidentfiles = Incidentfile.all\n end", "def set_incidentfile\n @incidentfile = Incidentfile.find(params[:id])\n end", "def create\n @incidentattachment = Incidentattachment.new(incidentattachment_params)\n @incidentattachment.filepath = \"N/A\"\n\n respond_to do |format|\n if @incidentattachment.save\n format.html { redirect_to @incidentattachment, notice: 'Incidentattachment was successfully created.' }\n format.json { render :show, status: :created, location: @incidentattachment }\n else\n format.html { render :new }\n format.json { render json: @incidentattachment.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @incident = Incident.new( incident_params )\n\n respond_to do |format|\n if @incident.save\n if params[ :incident_pictures ].present?\n params[ :incident_pictures ][ 'picture' ].each do | pic |\n @incident_picture = @incident.incident_pictures.create!( picture: pic )\n end\n end\n if params[:commit] == \"Submit & Next\"\n format.html { redirect_to new_incident_incident_detail_path(@incident), notice: 'Thank you! Incident was successfully created. Please submit incident details here' }\n format.json { render :show, status: :created, location: @incident }\n else\n format.html { redirect_to @incident, notice: 'Thank you! Incident was successfully created. Please submit incident details to make it Live' }\n format.json { render :show, status: :created, location: @incident }\n end\n else\n format.html { render :new }\n format.json { render json: @incident.errors, status: :unprocessable_entity }\n end\n end\n end", "def save_file_batch(files, new_obj_id)\n url = \"#{@base_url}/api/v2/files/#{new_obj_id}/create_batch\"\n resp = api_post_json(url, files.to_json)\n if resp.code != '201'\n @log.write(\"Error saving #{files.count} files #{files[0]['identifier']}...\\n\")\n @log.write(files.inspect)\n @log.write(\"\\n\" + resp.body + \"\\n\")\n #exit(1)\n end\n end", "def Upload file\n \n APICall(path: \"uploads.json?filename=#{file.split('/').last}\",method: 'POST',payload: File.read(file))\n \n end", "def create\n\n params['file'].each{|f|\n doc = Community::File.new(community_file_params)\n doc.file = f\n community_file_params[:title].blank? ? doc.title = f.original_filename : doc.title = community_file_params[:title]\n doc.author = current_user.email unless current_user.nil?\n doc.save\n\n file = File.read('public/uploads/community/file/file/' + doc._id.to_s + '/' + doc.file.filename)\n\n doc.import JSON.parse(file)\n } unless params['file'].nil?\n\n respond_to do |format|\n format.js {}\n format.json { head :no_content, status: :created }\n end\n end", "def create\n #@incident = @quote.incidents.new(incident_params)\n logger.info params[:incident]\n params[:incident].each do |incident|\n @incident = @quote.incidents.new(incident)\n @incident.save\n end\n respond_to do |format|\n format.json { render :json => { :code => \"201\", :description => \"Created incidents\"} }\n end\n end", "def post_attachment(file_s)\n setup\n @req = Net::HTTP::Post.new(\"/uploads.json\")\n auth\n @req[\"Content-Type\"] = \"application/octet-stream\"\n @req[\"Content-Length\"] = file_s.length\n @req.body = file_s\n res = @http.request(@req)\n\n if res.code.to_i == 201\n return [true, JSON.load(res.body)[\"upload\"][\"token\"]]\n else\n return [false, JSON.load(res.body)[\"errors\"].first]\n end\n end", "def create\n hash = []\n params[:files].each do |i,file_io|\n path = File.join(Rails.root,'public','attach',file_io.original_filename)\n File.open(path, \"wb\") { |f| f.write(file_io.read)}\n attachment = Attachment.create do |attach|\n attach.name = file_io.original_filename\n attach.describe = params[:describe]\n end \n hash.push attachment\n end\n render json: hash\n end", "def incidentattachment_params\n params.require(:incidentattachment).permit(:incident, :filepath, :name, :attachment, :incident_id)\n end", "def create\n @indicator_files = []\n\n params['indicate_file'].each do |f|\n doc = KeyIndicateMap::IndicatorFile.new\n doc.indicate_file = f\n params[:key_indicate_map_indicator_file][:title].blank? ? doc.title = f.original_filename : doc.title = params[:key_indicate_map_indicator_file][:title]\n doc.description = params[:key_indicate_map_indicator_file][:description]\n doc.year = params[:year]\n doc.author = current_user.email\n if !doc.save\n respond_to do |format|\n format.js {render :js=> 'alert(\"' + doc.errors.messages[:year][0] + '\");' }\n format.json { head :no_content, status: :unprocessable_entity }\n end\n return\n end\n @indicator_files << doc\n\n table = read_table_from_file 'public/uploads/key_indicate_map/indicator_file/indicate_file/' + doc._id.to_s + '/' + doc.indicate_file.filename\n @errors = doc.import table, doc.year.to_s\n end unless params['indicate_file'].nil?\n\n respond_to do |format|\n format.js {}\n format.json { head :no_content, status: :created }\n end\n end", "def create\n files = params[:files]\n\n files.each do |file|\n\n filename = file.original_filename\n\n # Rack uploads have `#tempfiles` and test uploads are Tempfile objects. More\n # investigation required.\n file = file.tempfile if file.respond_to?(:tempfile)\n\n UploadStore.create(\n :key => filename,\n :body => file,\n :public => true\n )\n end\n\n render json: {status: 'success'}\n end", "def uploadFile(fileName)\n\n puts \"Uploading '#{fileName}' to TestFlight ... \"\n\n currentTime = Time.new\n \n payload = {\n :api_token => API_TOKEN,\n :team_token => TEAM_TOKEN,\n :file => File.new(fileName.to_s, 'rb'),\n :notes => NOTES + \" (\"+currentTime.inspect+\")\",\n :distribution_lists => DIST_LIST,\n :notify => NOTIFY\n }\n \n begin\n response = RestClient.post(END_POINT, payload, :accept => :json)\n rescue => e\n response = e.response\n end\n \n if (response.code == 201) || (response.code == 200)\n puts \"Upload complete.\"\n else\n puts \"Upload failed. (#{response})\"\n end\n \nend", "def create\n @incident = Incident.new(incident_params)\n\n if @incident.save\n render json: @incident, status: :created, location: @incident\n else\n render json: @incident.errors, status: :unprocessable_entity\n end\n end", "def create\n @ticket_file = TicketFile.new(ticket_file_params)\n\n respond_to do |format|\n if @ticket_file.save\n format.html { redirect_to @ticket_file, notice: 'Ticket file was successfully created.' }\n format.json { render :show, status: :created, location: @ticket_file }\n else\n format.html { render :new }\n format.json { render json: @ticket_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def post_file_and_give_me_a_json(additional_path, file_path)\n if self.service_base_path != nil\n message = \"{error: \\\"File not found.\\\"}\"\n File.open(file_path) do |file|\n body = { 'arquivo' => file }\n message = self.http_client.post \"#{self.base_url}#{self.service_base_path}/#{additional_path}.json?api_key=#{self.access_token}\", body\n end\n trata_erro(message.content)\n end\n end", "def create\n AttackFile.write(attack_files_params[:filename], attack_files_params[:body])\n\n render status: 201\n end", "def files_post(file, opts = {})\n files_post_with_http_info(file, opts)\n end", "def upload_file(item_file)\n upload_response = @server.upload(item_file.file(item_file.metadata_full_path))\n json = JSON.parse(upload_response.body)\n json['files'].map { |f| f['id'] }\n end", "def postIngest_file( job_id, filedata)\n params = Hash.new\n params['job_id'] = job_id\n params['filedata'] = filedata\n return doCurl(\"post\",\"/ingest_file\",params)\n end", "def upload_upload(directory:, files:)\n upload_files = {multipart: true}\n files.each {|f| upload_files[f] = File.open(f, 'rb')}\n r = aptly_request 'POST', \"api/files/#{directory}\", payload: upload_files\n JSON.parse(r.body)\n end", "def perform_study_file_upload(filename, study_file_params, study_id)\n file_upload = Rack::Test::UploadedFile.new(Rails.root.join('test', 'test_data', filename))\n study_file_params[:study_file].merge!(upload: file_upload)\n patch \"/single_cell/studies/#{study_id}/upload\", params: study_file_params, headers: {'Content-Type' => 'multipart/form-data'}\nend", "def add_fileset\n\n # grab the parameters\n work_id = params[:work]\n file_id = params[:file]\n label = params[:label]\n\n # validate them\n if work_id.blank? == false && file_id.blank? == false && label.blank? == false\n work = get_the_work( work_id )\n if work.nil? == false\n filename = APIV1FilesetsController.cache_contents( file_id )\n if filename.blank? == false\n fileset = ::FileSet.new\n fileset.title << label\n file_actor = ::CurationConcerns::Actors::FileSetActor.new( fileset, @api_user )\n file_actor.create_metadata( work )\n file_actor.create_content( File.open( filename ) )\n fileset.visibility = Hydra::AccessControls::AccessRight::VISIBILITY_TEXT_VALUE_PUBLIC\n fileset.save!\n\n # audit the information\n #audit_log( \"File #{label} for work id #{work_id} (#{work.identifier}) added by #{User.cid_from_email( @api_user.email)}\" )\n WorkAudit.audit( work_id, User.cid_from_email( @api_user.email), \"File #{File.basename( filename )}/#{label} added\" )\n\n render_standard_response( :ok )\n else\n render_standard_response( :not_found, 'File not found' )\n end\n else\n render_standard_response( :not_found, 'Work not found' )\n end\n else\n render_standard_response( :unauthorized, 'Missing work identifier or file identifier or file label' )\n end\n\n end", "def files(trading_partner_id, filename)\n scope 'default'\n url = URI.parse(@api_url + '/files/')\n\n File.open(filename) do |f|\n req = Net::HTTP::Post::Multipart.new url.path,\n 'file' => UploadIO.new(f, 'application/EDI-X12', filename),\n 'trading_partner_id' => trading_partner_id\n req['Authorization'] = \"Bearer #{default_scope.token}\"\n req['User-Agent'] = user_agent\n\n @response = Net::HTTP.start(url.host, url.port) do |http|\n http.request(req)\n end\n end\n\n JSON.parse(@response.body)\n end", "def post_file(filename,repo)\n curl_post(\"#{self.host}/api2/repos/#{repo}/file/?p=#{filename}\",{\"operation\"=> \"create\"}).body_str\n end", "def update\n @treq = Treq.find(params[:id])\n\n respond_to do |format|\n unless params[:treq_files].blank?\n params[:treq_files]['file'].each do |a|\n @treq_file = @treq.treq_files.create!(:file => a, :treq_id => @treq.id)\n end\n end\n if @treq.update_attributes(params[:treq])\n format.html { redirect_to @treq, notice: 'Treq was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @treq.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @incidentfile.update(incidentfile_params)\n format.html { redirect_to @incidentfile, notice: 'Incidentfile was successfully updated.' }\n format.json { render :show, status: :ok, location: @incidentfile }\n else\n format.html { render :edit }\n format.json { render json: @incidentfile.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @inciting_incident = IncitingIncident.new(inciting_incident_params)\n\n if @inciting_incident.save\n render json: @inciting_incident, status: :created, location: @inciting_incident\n else\n render json: @inciting_incident.errors, status: :unprocessable_entity\n end\n end", "def create\n @unsuccessful_file = UnsuccessfulFile.new(unsuccessful_file_params)\n\n respond_to do |format|\n if @unsuccessful_file.save\n format.json { head :no_content }\n else\n format.json { render json: @unsuccessful_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def _upload(api_key, file) \n url = Client.site + \"/upload.json\"\n params = { \"api_key\" => api_key, \"api_method\" => \"ruby\", \"id\" => id.to_s, \"file\" => file }\n resp = HTTPClient.post url, params \n JSON.parse(resp.content)\n end", "def upload_file\n file = google_session.upload_from_file(file_params[:file].tempfile,\n file_params[:file].original_filename)\n collection.add(file)\n google_session.root_collection.remove(file)\n render json: 'Upload Successful'\n end", "def create\n data =params[:data]\n\n resource = ActiveSupport::JSON.decode(data)\n @repo = Repository.find(resource[\"repo_id\"])\n dir_path = resource[\"dirPath\"];\n post = DataFile.save(params['file'], @repo.path+dir_path)\n\n\t\t@git = GitHelper.init(@repo.path, current_user.email, current_user.name)\n\t\tGitHelper.commitAll(@git, resource[\"comment\"])\n\n\t\trender json: {success: \"file uploaded\"}\n end", "def upload(auth_token, file_path, ticket_id, end_point)\n params = {\n :auth_token => auth_token,\n :ticket_id => ticket_id\n }\n params[:api_sig] = generate_api_sig params\n \n params.merge!({ :file_data => File.open(file_path) })\n \n client = HTTPClient.new\n response = client.post(end_point, params)\n md5 = response.content\n\n self.class.create_json_manifest(md5)\n end", "def create\n uploaded_data = params[:timeline][:json_file]\n file_path = Rails.root.join('public', 'uploads', uploaded_data.original_filename)\n File.open(file_path, 'wb') do |file|\n file.write(uploaded_data.read)\n end\n\n @timeline = Timeline.new(\n :device_name => params[:timeline][:device_name],\n :test_name => params[:timeline][:test_name],\n :json_digest => view_context.parse(file_path))\n\n respond_to do |format|\n if @timeline.save\n format.html { redirect_to @timeline, notice: 'Timeline was successfully created.' }\n format.json { render json: @timeline, status: :created, location: @timeline }\n else\n format.html { render action: \"new\" }\n format.json { render json: @timeline.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @resource_file = ResourceFile.new(params[:resource_file])\n\n respond_to do |format|\n if @resource_file.save\n format.html { redirect_to @resource_file, notice: 'Resource file was successfully created.' }\n format.json { render json: @resource_file, status: :created, location: @resource_file }\n else\n format.html { render action: \"new\" }\n format.json { render json: @resource_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @test_file = TestFile.new(params[:test_file])\n\n respond_to do |format|\n if @test_file.save\n format.html { redirect_to @test_file, notice: 'Test file was successfully created.' }\n format.json { render json: @test_file, status: :created, location: @test_file }\n else\n format.html { render action: \"new\" }\n format.json { render json: @test_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def new_feature_request(folder_id, feature_content, file_name)\n url = URI(\"#{$base_url}/api/projects/#{$project_id}/folders/#{folder_id}/create_from_feature\")\n\n http = Net::HTTP.new(url.host, url.port)\n http.use_ssl = true\n request = Net::HTTP::Post.new(url)\n request[\"accept\"] = 'application/vnd.api+json; version=1'\n request[\"access-token\"] = $access_token\n request[\"uid\"] = $uid\n request[\"client\"] = $client\n request[\"Content-Type\"] = 'application/json'\n\n data = {\n data: {\n attributes: {\n \"feature\": feature_content\n }\n }\n }\n\n request.body = JSON.generate(data)\n response = http.request(request)\n\n if response.code == 200.to_s\n $created_count = $created_count + 1\n $success_uploaded_count = $success_uploaded_count + 1\n $uploaded_features_list.push(file_name)\n puts \"Feature '#{get_name_feature_from_file(feature_content)}' created.\"\n else\n $fail_uploaded_count = $fail_uploaded_count + 1\n $not_uploaded_features_list.push(file_name)\n end\n\n response.code\nend", "def create\n @product_bulletin = ProductBulletin.new(params[:product_bulletin])\n\n respond_to do |format|\n if @product_bulletin.save\n unless params[:bulletin_files].blank?\n params[:bulletin_files]['file'].each do |a|\n @bulletin_file = @product_bulletin.bulletin_files.create!(:file => a, :product_bulletin_id => @product_bulletin.id)\n end\n end\n format.html { redirect_to @product_bulletin, notice: 'Product bulletin was successfully created.' }\n format.json { render json: @product_bulletin, status: :created, location: @product_bulletin }\n else\n format.html { render action: \"new\" }\n format.json { render json: @product_bulletin.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @incident = Incident.new(incident_params)\n\n respond_to do |format|\n if @incident.save\n format.html { redirect_to @incident, notice: 'Incident was successfully created.' }\n format.json { render :show, status: :created, location: @incident }\n else\n format.html { render :new }\n format.json { render json: @incident.errors, status: :unprocessable_entity }\n end\n end\n end", "def import\n @obj = Node.create_sub_tree JSON.parse(File.read(params[:file_to_upload].tempfile.path), symbolize_names: true)\n\n respond_to do |format|\n format.html { redirect_to @obj, notice: 'Node was successfully created.' }\n format.json { render json: @obj, status: :created, location: @obj }\n end\n end", "def discover_files\n authorize! :create, resource_class\n respond_to do |f|\n f.json do\n render json: file_locator.to_h\n end\n end\n end", "def create\n @incident = Incident.new(params[:incident])\n\n respond_to do |format|\n if @incident.save\n format.html { redirect_to @incident, notice: 'Incident was successfully created.' }\n format.json { render json: @incident, status: :created, location: @incident }\n else\n format.html { render action: \"new\" }\n format.json { render json: @incident.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @incident = Incident.new(params[:incident])\n\n respond_to do |format|\n if @incident.save\n format.html { redirect_to @incident, notice: 'Incident was successfully created.' }\n format.json { render json: @incident, status: :created, location: @incident }\n else\n format.html { render action: \"new\" }\n format.json { render json: @incident.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @project = Project.find params[:project_id]\n @file = @project.source_files.create :name=>params[:name], :code=>params[:code]\n\n respond_to do |format|\n if @file.save \n format.html {redirect_to @project}\n format.json {render json: @file}\n end\n end\n end", "def create_file\n project = UserFile.publication_project!(current_user, @scope)\n\n api = DIContainer.resolve(\"api.user\")\n file_dxid = api.file_new(params[:name], project)[\"id\"]\n\n file = UserFile.create!(\n dxid: file_dxid,\n project: project,\n name: params[:name],\n state: \"open\",\n description: params[:description],\n user: current_user,\n parent: current_user,\n scope: @scope,\n UserFile.scope_column_name(@scope) => @folder&.id,\n )\n\n render json: { id: file.uid }\n end", "def upload\n validate_documents_content_type\n validate_documents_page_size\n\n power_of_attorney = ClaimsApi::PowerOfAttorney.find_using_identifier_and_source(id: params[:id],\n source_name: source_name)\n power_of_attorney.set_file_data!(documents.first, params[:doc_type])\n power_of_attorney.status = 'submitted'\n power_of_attorney.save!\n power_of_attorney.reload\n ClaimsApi::VBMSUploadJob.perform_async(power_of_attorney.id)\n render json: power_of_attorney, serializer: ClaimsApi::PowerOfAttorneySerializer\n end", "def create\n @capa = Capa.new(params[:capa])\n @capa = @capa.new_status(@capa)\n respond_to do |format|\n if @capa.save\n unless params[:capa_files].blank?\n params[:capa_files]['file'].each do |a|\n @capa_file = @capa.capa_files.create!(:file => a, :capa_id => @capa.id)\n end\n end\n CapaMailer.submit_capa(@capa).deliver\n format.html { if user_in_shop? \n redirect_to new_capa_path, notice: 'Capa was successfully notice.' \n else\n redirect_to @capa, notice: 'Capa was successfully notice.'\n end}\n format.json { render json: @capa, status: :created, location: @capa }\n else\n format.html { render action: \"new\" }\n format.json { render json: @capa.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @bulletin_file = BulletinFile.new(params[:bulletin_file])\n\n respond_to do |format|\n if @bulletin_file.save\n format.html { redirect_to @bulletin_file, notice: 'Bulletin file was successfully created.' }\n format.json { render json: @bulletin_file, status: :created, location: @bulletin_file }\n else\n format.html { render action: \"new\" }\n format.json { render json: @bulletin_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @casefile = Casefile.new(casefile_params)\n\n respond_to do |format|\n if @casefile.save\n format.html { redirect_to @casefile, notice: 'Casefile was successfully created.' }\n format.json { render :show, status: :created, location: @casefile }\n else\n format.html { render :new }\n format.json { render json: @casefile.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @incident = Incident.new(incident_params)\n\n respond_to do |format|\n if @incident.save\n format.html { redirect_to root_url, notice: 'Incident was successfully created.' }\n format.json { render action: 'show', status: :created, location: @incident }\n else\n format.html { render action: 'new' }\n format.json { render json: @incident.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @agent_import_file = AgentImportFile.new(params[:agent_import_file])\n @agent_import_file.user = current_user\n\n respond_to do |format|\n if @agent_import_file.save\n format.html { redirect_to @agent_import_file, :notice => t('controller.successfully_created', :model => t('activerecord.models.agent_import_file')) }\n format.json { render :json => @agent_import_file, :status => :created, :location => @agent_import_file }\n else\n format.html { render :action => \"new\" }\n format.json { render :json => @agent_import_file.errors, :status => :unprocessable_entity }\n end\n end\n end", "def create\n @action_file = ActionFile.new(params[:action_file])\n\n respond_to do |format|\n if @action_file.save\n format.html { redirect_to @action_file, notice: 'Action file was successfully created.' }\n format.json { render json: @action_file, status: :created, location: @action_file }\n else\n format.html { render action: \"new\" }\n format.json { render json: @action_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def add_job_file(job_id, filename, data)\n request = Net::HTTP::Put.new(\"/jobs/#{job_id}/files/#{filename}\")\n request.body = JSON.generate(data)\n response = http.request(request)\n handle_response({ request_method: request.method, request_path: request.path, request_body: request.body }, response)\n end", "def test_attach_file\n# post :upload, \"note\"=>{\"title\"=>\"my note\"}, \"courseid\"=>\"806350272748085520\",\n# \"processor\"=>{\"id\"=>\"1000001\"}, \"success\"=>\"/course/806350272748085520/ACMA-320/share_notes\", \n# \"upload_id\"=>\"1169944954\", \n# \"failure\"=>\"/course/806350272748085520/ACMA-320/share_notes\"\n \n post :upload, \"noteid\"=>\"816717565610925385\", \"processor\"=>{\"id\"=>\"1000001\"}\n \n end", "def create\n @indexed_file = IndexedFile.new(params[:indexed_file])\n\n respond_to do |format|\n if @indexed_file.save\n format.html { redirect_to @indexed_file, notice: 'Indexed file was successfully created.' }\n format.json { render json: @indexed_file, status: :created, location: @indexed_file }\n else\n format.html { render action: \"new\" }\n format.json { render json: @indexed_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def destroy\n @incidentfile.destroy\n respond_to do |format|\n format.html { redirect_to incidentfiles_url, notice: 'Incidentfile was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def create\n @userfile = Userfile.new(params[:userfile])\n \n\n respond_to do |format|\n if @userfile.save\n format.html {\n render :json => [@userfile.to_jq_upload].to_json,\n :content_type => 'text/html',\n :layout => false\n }\n format.json { render json: {files: [@userfile.to_jq_upload] }, status: :created, location: @userfile }\n else\n format.html { render action: \"new\" }\n format.json { render json: @userfile.errors, status: :unprocessable_entity }\n end\n end\n \n end", "def create\n @file_info = FileInfo.new(params[:file_info])\n\n respond_to do |format|\n if @file_info.save\n format.html { redirect_to @file_info, notice: 'File info was successfully created.' }\n format.json { render json: @file_info, status: :created, location: @file_info }\n else\n format.html { render action: \"new\" }\n format.json { render json: @file_info.errors, status: :unprocessable_entity }\n end\n end\n end", "def add_file(filename)\n step('I visit the new file page')\n fill_in(\"file_label\", with: filename)\n attach_file('file_file', filename)\n fill_in('file_description', with: sample_file_description(filename))\n click_link_or_button('Upload File')\n wait_for_ajax_complete\nend", "def stage_json(file_path)\n file_dir, filename = File.split(file_path)\n relative_dir_path = file_dir.gsub(%r{^#{@metrics_dir}/}, \"\")\n destination_dir = \"#{@staging_dir}/#{relative_dir_path}\"\n FileUtils.mkdir_p(destination_dir) unless File.directory?(destination_dir)\n FileUtils.cp(file_path, \"#{destination_dir}/#{filename}\")\n end", "def incident_params\n params.require(:incident).permit(:report_type, :your_name, :job_title, :injury_date, :injury_time, :witnesses, :location, :circumstances, :event_discription, :injuries_type, :ppe_used, :medical_assistance_provided, :project_id,:file)\n end", "def create\n @incident = Incident.new(incident_params)\n\n respond_to do |format|\n if @incident.save\n @incident.users << current_user\n format.html { redirect_to incident_path(@incident) }\n format.js { }\n format.json { render incident_path(@incident), status: :created, location: @incident }\n else\n format.html { render :new }\n format.json { render json: @incident.errors, status: :unprocessable_entity }\n end\n end\n end", "def index\n @incidentattachments = Incidentattachment.all\n @incident = Incident.all\n end", "def upload_file(folder_id, file_url) \n\tputs \"Uploading file: \" + file_url + \" to folder: \" + folder_id\n\tfile = {\n\t\t:url => file_url, \n\t\t:name => \"Ruby_File.pdf\", \n\t\t:notes => \"This was uploaded using Ruby\", \n\t\t:copies => 1, \n\t\t:folderId => folder_id,\n\t\t# :flow => \"Flow ezName\" # The flow value is either the easy submit name of the flow or the _id property from get_flows()\n\t}\n\tdata = JSON.generate(file)\n\tresponse = request_post('/api/partner/file', data)\n\tputs response.body\nend", "def files_post_with_http_info(file, opts = {})\n if @api_client.config.debugging\n @api_client.config.logger.debug \"Calling API: FilesApi.files_post ...\"\n end\n # verify the required parameter 'file' is set\n if @api_client.config.client_side_validation && file.nil?\n fail ArgumentError, \"Missing the required parameter 'file' when calling FilesApi.files_post\"\n end\n # resource path\n local_var_path = \"/files\"\n\n # query parameters\n query_params = {}\n\n # header parameters\n header_params = {}\n\n # form parameters\n form_params = {}\n form_params[\"file\"] = file\n form_params[\"deal_id\"] = opts[:'deal_id'] if !opts[:'deal_id'].nil?\n form_params[\"person_id\"] = opts[:'person_id'] if !opts[:'person_id'].nil?\n form_params[\"org_id\"] = opts[:'org_id'] if !opts[:'org_id'].nil?\n form_params[\"product_id\"] = opts[:'product_id'] if !opts[:'product_id'].nil?\n form_params[\"activity_id\"] = opts[:'activity_id'] if !opts[:'activity_id'].nil?\n form_params[\"note_id\"] = opts[:'note_id'] if !opts[:'note_id'].nil?\n\n # http body (model)\n post_body = nil\n auth_names = [ 'access_token' ]\n response = @api_client.call_api(:POST, local_var_path,\n :header_params => header_params,\n :query_params => query_params,\n :form_params => form_params,\n :body => post_body,\n :auth_names => auth_names)\n if @api_client.config.debugging\n @api_client.config.logger.debug \"API called: FilesApi#files_post\\nData: #{data.inspect}\\nStatus code: #{status_code}\\nHeaders: #{headers}\"\n end\n return response\n end", "def create\n @ufile = Ufile.new(params[:ufile])\n @ufile.user_id = current_user.id\n respond_to do |format|\n if @ufile.save\n format.html { redirect_to \"/ufiles\", notice: 'Ufile was successfully created.' }\n format.json { render json: @ufile, status: :created, location: @ufile }\n else\n format.html { render action: \"new\" }\n format.json { render json: @ufile.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n path = save_file \n if path \n if update_versioned_files?\n\n @file_version = FileVersion.new(:path =>path, :versioned_file_id => params[:versioned_file_id], :isActive => true )\n if @file_version.save\n render json: @file_version, status: :created, location: @file_version\n else\n render json: @file_version.errors, status: :unprocessable_entity\n end\n else\n render json: @file_versions.errors, status: :unprocessable_entity\n end\n else \n render json: { :message => \"The file is not valid\" }, :status => :precondition_failed\n end\n \n end", "def upload_file\n #TODO\n end", "def post_file(path, file_path, options = {})\n request = Net::HTTP::Post.new(request_uri(path))\n options[:partial_scan] ||= CopyleaksApi::Config.allow_partial_scan\n boundary = \"copyleaks_sdk_#{SecureRandom.hex(4)}\"\n request.body = file_body(file_path, boundary)\n make_request(request, options.merge(boundary: boundary))\n end", "def create\n @event_subscription = EventSubscription.new(event_subscription_params)\n @event_subscription[:approved] = @event_subscription.user_requirements_status\n if @event_subscription.save && !@event_subscription[:approved]\n file_params.each do |requirement|\n if(requirement[\"doc\"])\n requirement.symbolize_keys\n requirement[:doc].symbolize_keys\n path = \"data:#{requirement[:doc][:filetype]};base64, #{requirement[:doc][:base64]}\"\n Document.create(user_id: @event_subscription.user_id,\n requirement_id: requirement[:id],\n state: \"pending_review\",\n path: path\n )\n end\n end\n\n render json: @event_subscription, status: :created, location: @event_subscription\n else\n render json: @event_subscription.errors, status: :unprocessable_entity\n end\n end", "def create_file\n dest_file = Item.new(Path.new(params[:dest_file]))\n content = params[:content]\n response = {}\n response[:dest_file] = dest_file\n permission = params[:force_permission]\n if dest_file.create(content, permission)\n response[:msg] = \"Success\"\n render json: response, status: 200\n else\n response[:msg] = \"Fail\"\n render json: response, status: 422\n end\n end", "def download_incident_list(file_name)\n raw_file_name = File.expand_path(\"~/Downloads/incidents.csv\")\n if File.exists?(raw_file_name)\n puts \"Deleted old Downloads file #{raw_file_name}\"\n FileUtils.rm(raw_file_name)\n end\n \n puts \"Request report from #{incident_list_url}\"\n `open \"#{incident_list_url}\"`\n until File.exists?(raw_file_name) do\n puts \"Waiting for file to download\"\n sleep(1)\n end\n\n if File.exists?(file_name)\n puts \"Deleted old incidents/raw file #{file_name}\"\n FileUtils.rm(file_name)\n end\n\n `mv #{raw_file_name} #{file_name}`\n puts \"#{raw_file_name} written to #{file_name}\"\n end", "def upload_document(file, \r\n ticket_id)\r\n\r\n # Validate required parameters\r\n if file == nil\r\n raise ArgumentError.new \"Required parameter 'file' cannot be nil.\"\r\n elsif ticket_id == nil\r\n raise ArgumentError.new \"Required parameter 'ticket_id' cannot be nil.\"\r\n end\r\n\r\n # the base uri for api requests\r\n _query_builder = Configuration.base_uri.dup\r\n\r\n # prepare query string for API call\r\n _query_builder << '/tickets/{ticket_id}/document_upload'\r\n\r\n # process optional query parameters\r\n _query_builder = APIHelper.append_url_with_template_parameters _query_builder, {\r\n 'ticket_id' => ticket_id\r\n }\r\n\r\n # validate and preprocess url\r\n _query_url = APIHelper.clean_url _query_builder\r\n\r\n # prepare headers\r\n _headers = {\r\n 'user-agent' => 'APIMATIC 2.0',\r\n 'X-API-TOKEN' => Configuration.x_api_token,\r\n 'X-API-EMAIL' => Configuration.x_api_email\r\n }\r\n\r\n # append custom auth authorization\r\n CustomAuthUtility.append_custom_auth_params _headers\r\n\r\n # prepare parameters\r\n _parameters = {\r\n 'file' => file\r\n }\r\n\r\n # invoke the API call request to fetch the response\r\n _response = Unirest.post _query_url, headers: _headers, parameters: _parameters\r\n\r\n # Error handling using HTTP status codes\r\n if _response.code == 401\r\n raise APIException.new 'Your API key is incorrect', 401, _response.body\r\n elsif _response.code == 400\r\n raise APIException.new 'There is an error in the parameters you send', 400, _response.body\r\n elsif _response.code == 404\r\n raise APIException.new 'Cannot find the resource specified', 404, _response.body\r\n elsif !_response.code.between?(200, 206) # [200,206] = HTTP OK\r\n raise APIException.new 'HTTP Response Not OK', _response.code, _response.body\r\n end\r\n\r\n # Return appropriate type\r\n _response.body\r\n end", "def create\n @user_file = UserFile.new(params[:user_file])\n\n respond_to do |format|\n if @user_file.save\n format.html { redirect_to @user_file, :notice => 'User file was successfully created.' }\n format.json { render :json => @user_file, :status => :created, :location => @user_file }\n else\n format.html { render :action => \"new\" }\n format.json { render :json => @user_file.errors, :status => :unprocessable_entity }\n end\n end\n end", "def create\n @file_info = FileInfo.new(file_info_params)\n\n respond_to do |format|\n if @file_info.save\n format.html { redirect_to @file_info, notice: 'File info was successfully created.' }\n format.json { render :show, status: :created, location: @file_info }\n else\n format.html { render :new }\n format.json { render json: @file_info.errors, status: :unprocessable_entity }\n end\n end\n end", "def file_report(resources)\n resources = format(resources)\n merged_options = options.merge({ resource: resources })\n response = RestClient.post endpoint(\"/file/report\"), merged_options\n parse_response(response)\n end", "def create\n infile = params[:file]\n filename = infile.original_filename\n filename_extension = Pathname.new(filename).extname.downcase\n\n if '.xml' != filename_extension\n error_msg(ErrorCodes::VALIDATION_ERROR,\"#{I18n.t \"endnote_files.errors.file_format_not_allowed\"}\")\n render_json\n return\n end\n\n xml = infile.read\n endnote_file = EndnoteFile.new(xml: xml, username: @current_user.username, name: filename)\n\n if endnote_file.save\n #pp '-*- EndnoteFileController.create - successfully stored simple EndnoteFile object -*-'\n endnote_file = handle_file_content(endnote_file)\n @response[:endnote_file] = endnote_file.as_json\n #pp '-*- EndnoteFileController.create produced json response-*-'\n render_json(201)\n else\n #pp '-*- EndnoteFileController.create An error - did NOT store EndnoteFile object -*-'\n error_msg(ErrorCodes::VALIDATION_ERROR,\"#{I18n.t \"endnote_files.errors.create_error\"}\")\n render_json\n end\n end", "def create\n @ticket = Ticket.new(ticket_params)\n\n respond_to do |format|\n if @ticket.save\n params[:ticket_attachments]['image'].each do |i|\n @ticket_attachment = @ticket.ticket_attachments.create!(:image => i)\n end\n format.html { redirect_to @ticket, notice: 'Ticket was successfully created.' }\n format.json { render :show, status: :created, location: @ticket }\n else\n format.html { render :new }\n format.json { render json: @ticket.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @uploaded_file = UploadedFile.new(file_name: params[:uploaded_file][:file_name].original_filename)\n respond_to do |format|\n if @uploaded_file.save\n load_imported_items\n format.html { redirect_to root_path, notice: 'Uploaded file was successfully created.' }\n format.json { render :show, status: :created, location: @uploaded_file }\n else\n format.html { render :new }\n format.json { render json: @uploaded_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def upload\n @db=Database.find(params[:id])\n @files = params[:files]\n @files.each do |file|\n name = file.original_filename\n directory = @db.path\n path = File.join(directory, name)\n File.open(path, \"wb\") { |f| f.write(file.read) }\n end\n flash[:notice] = \"File uploaded\"\n respond_to do |format|\n format.html {redirect_to files_database_url(@db)}\n format.json { render json: @files }\n end\n end", "def upload_file(project_id, req_params)\n params = { query: [project_id, :upload], req: req_params }\n\n response = endpoint(name: 'Files', params: params).do_post\n\n process_id = response.content.dig('process', 'process_id')\n\n response.patch_endpoint_with endpoint(name: 'QueuedProcesses', params: { query: [project_id, process_id] })\n\n resource 'QueuedProcess', response\n end", "def db_add_pull_request_files(db, client, issues, org, repo)\n issues.each do |item|\n if(item.pull_request)\n begin\n files=client.pull_request_files(\"#{org}/#{repo}\", item.number.to_i)\n files.each do |file|\n if(db_pull_request_file_stored?(db, item.id, file.filename))\n db[\"DELETE FROM pull_request_files WHERE pull_request_id=? AND filename=?\", item.id.to_s, file.filename].delete\n end\n db[\n \"INSERT INTO pull_request_files (pull_request_id, filename, additions, deletions, changes, status)\n VALUES (?, ?, ?, ?, ?, ?)\",\n item.id, file.filename, file.additions, file.deletions, file.changes, file.status].insert\n end\n rescue Octokit::InternalServerError\n # 500 - Server Error: Sorry, there was a problem generating this diff. The repository may be missing relevant data. (Octokit::InternalServerError)\n # Skipping\n end\n end\n end\n end", "def create\n @upload_file_to_server = UploadFileToServer.new(upload_file_to_server_params)\n\n respond_to do |format|\n if @upload_file_to_server.save\n format.html { redirect_to @upload_file_to_server, notice: 'Upload file to server was successfully created.' }\n format.json { render :show, status: :created, location: @upload_file_to_server }\n else\n format.html { render :new }\n format.json { render json: @upload_file_to_server.errors, status: :unprocessable_entity }\n end\n end\n end", "def files_post(api_key, file, opts = {})\n data, _status_code, _headers = files_post_with_http_info(api_key, file, opts)\n return data\n end", "def create\n @datafile = Datafile.new(datafile_params)\n\n respond_to do |format|\n if @datafile.save\n format.html { redirect_to @datafile, notice: 'Datafile was successfully created.' }\n format.json { render action: 'show', status: :created, location: @datafile }\n else\n format.html { render action: 'new' }\n format.json { render json: @datafile.errors, status: :unprocessable_entity }\n end\n end\n end", "def archive_file_ids\n archive = File.open('drive_file_ids.json', 'w')\n File.write('drive_file_ids.json', @file_ids.to_json)\n archive.close\n end", "def create\n @qa_session_file = @qa_session.qa_session_files.create(qa_session_file_params)\n\n respond_to do |format|\n if @qa_session_file.save\n format.html { redirect_to @qa_session_file, notice: 'qa_session file was successfully created.' }\n format.json { render :show, status: :created, location: @qa_session_file }\n else\n format.html { render :new }\n format.json { render json: @qa_session_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @user_file = UserFile.new(user_file_params)\n @user_file.user = current_user\n respond_to do |format|\n if @user_file.save\n format.html { redirect_to @user_file, notice: \"User file was successfully created.\" }\n format.json { render :show, status: :created, location: @user_file }\n else\n format.html { render :new, status: :unprocessable_entity }\n format.json { render json: @user_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def post(path = '/files/', params = {})\n request :post, path, params\n end", "def create\n @incident = Incident.new(incident_params)\n @incident.user = current_user\n\n respond_to do |format|\n if @incident.save\n new_incident_email(@incident)\n format.html { redirect_to @incident, notice: 'Incident was successfully created.' }\n format.json { render :show, status: :created, location: @incident }\n else\n format.html { render :new }\n format.json { render json: @incident.errors, status: :unprocessable_entity }\n end\n end\n end", "def upload(path)\n name = ::File.basename(path)\n data = ::File.read(path)\n _post_with_file(\"/files\", file: data, filename: name) { |json| json }\n end", "def create_upload_files(record)\n return unless record.mapper.respond_to?(:files)\n files_to_attach = record.mapper.files\n return [] if files_to_attach.nil? || files_to_attach.empty?\n\n uploaded_file_ids = []\n files_to_attach.each do |filename|\n file = File.open(find_file_path(filename))\n uploaded_file = Hyrax::UploadedFile.create(user: depositor, file: file)\n uploaded_file_ids << uploaded_file.id\n file.close\n end\n uploaded_file_ids\n end", "def create\n @userfile = Userfile.new(userfile_params)\n\n respond_to do |format|\n if @userfile.save\n format.html { redirect_to @userfile, notice: 'Userfile was successfully created.' }\n format.json { render :show, status: :created, location: @userfile }\n else\n format.html { render :new }\n format.json { render json: @userfile.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n #@file_folder = FileFolder.new(file_folder_params)\n @folder = current_user.folders.where(\"id = ?\", params[:current_folder_id]).first\n @files = @folder.file_folders.build \n io_file = params[:file]\n respond_to do |format|\n if @files.store(io_file)\n @files = [] << @files\n format.js {render \"files_list\", status: 200}\n else\n format.js {render js: \"alert('Could not create file\", status: 400}\n end \n end \n end", "def create\n @workfile = Workfile.new(workfile_params)\n\n respond_to do |format|\n if @workfile.save\n format.html { redirect_to @workfile, notice: 'Workfile was successfully created.' }\n format.json { render :show, status: :created, location: @workfile }\n else\n format.html { render :new }\n format.json { render json: @workfile.errors, status: :unprocessable_entity }\n end\n end\n end", "def parse_files_json(file)\n\n files_hash = convert_json(b2_list_file_names(file))\n files = {}\n\n files_hash[\"files\"].each do |file_hash|\n files[file_hash[\"fileName\"]] = file_hash[\"fileId\"]\n end\n\n return files\n\nend" ]
[ "0.64833826", "0.6448247", "0.6322326", "0.6237543", "0.62092316", "0.6165968", "0.5931547", "0.5913957", "0.586241", "0.58463055", "0.57442194", "0.57401264", "0.5737596", "0.56944656", "0.5692664", "0.56840324", "0.56753296", "0.56732726", "0.5669069", "0.5666979", "0.5664825", "0.564302", "0.56295305", "0.56126374", "0.56008744", "0.5597819", "0.5577892", "0.55595344", "0.5543802", "0.5536009", "0.55356157", "0.54897535", "0.5488653", "0.54717255", "0.5459497", "0.5444863", "0.54332536", "0.5415095", "0.5410159", "0.5402106", "0.53773487", "0.53628856", "0.5352349", "0.5344858", "0.5344502", "0.5343447", "0.5343447", "0.5330396", "0.53295386", "0.5328068", "0.53236496", "0.5322675", "0.53159225", "0.5297221", "0.5296174", "0.52898276", "0.52890044", "0.52856153", "0.52719104", "0.5269372", "0.52649075", "0.5261906", "0.5252719", "0.52472085", "0.52450335", "0.5243628", "0.5243477", "0.5238283", "0.5233712", "0.52313834", "0.5228375", "0.52194864", "0.5216434", "0.52122265", "0.5200421", "0.5194374", "0.5192171", "0.5187941", "0.5184089", "0.51804775", "0.51742905", "0.5171493", "0.51680547", "0.51675963", "0.5166287", "0.5165526", "0.5155253", "0.51521957", "0.5151484", "0.51494974", "0.51492286", "0.5144077", "0.51394063", "0.5136502", "0.51327443", "0.5131803", "0.5127086", "0.51266855", "0.5117737", "0.51157206" ]
0.7233455
0
PATCH/PUT /incidentfiles/1 PATCH/PUT /incidentfiles/1.json
def update respond_to do |format| if @incidentfile.update(incidentfile_params) format.html { redirect_to @incidentfile, notice: 'Incidentfile was successfully updated.' } format.json { render :show, status: :ok, location: @incidentfile } else format.html { render :edit } format.json { render json: @incidentfile.errors, status: :unprocessable_entity } end end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update\n @treq = Treq.find(params[:id])\n\n respond_to do |format|\n unless params[:treq_files].blank?\n params[:treq_files]['file'].each do |a|\n @treq_file = @treq.treq_files.create!(:file => a, :treq_id => @treq.id)\n end\n end\n if @treq.update_attributes(params[:treq])\n format.html { redirect_to @treq, notice: 'Treq was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @treq.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @incident.update(incident_params)\n format.json { head :no_content }\n else\n format.json { render json: @incident.errors, status: :unprocessable_entity }\n end\n end\n end", "def set_incidentfile\n @incidentfile = Incidentfile.find(params[:id])\n end", "def update\n respond_to do |format|\n if @incidentattachment.update(incidentattachment_params)\n format.html { redirect_to @incidentattachment, notice: 'Incidentattachment was successfully updated.' }\n format.json { render :show, status: :ok, location: @incidentattachment }\n else\n format.html { render :edit }\n format.json { render json: @incidentattachment.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @capa = Capa.find(params[:id])\n\n respond_to do |format|\n if @capa.update_attributes(params[:capa])\n unless params[:capa_files].blank?\n params[:capa_files]['file'].each do |a|\n @capa_file = @capa.capa_files.create!(:file => a, :capa_id => @capa.id)\n end\n end\n format.html { redirect_to @capa, notice: 'Capa was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @capa.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @ticket_file.update(ticket_file_params)\n format.html { redirect_to @ticket_file, notice: 'Ticket file was successfully updated.' }\n format.json { render :show, status: :ok, location: @ticket_file }\n else\n format.html { render :edit }\n format.json { render json: @ticket_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @event_subscription.update(event_subscription_params)\n @event_subscription.save\n\n file_params.each do |requirement|\n if(requirement[\"doc\"])\n requirement.symbolize_keys\n requirement[:doc].symbolize_keys\n path = \"data:#{requirement[:doc][:filetype]};base64, #{requirement[:doc][:base64]}\"\n Document.update(id: requirement[:doc][:id],\n user_id: @event_subscription.user_id,\n requirement_id: requirement[:id],\n state: \"pending_review\",\n path: path\n )\n end\n end\n render json: @event_subscription, status: :updated\n end", "def update\n @test_file = TestFile.find(params[:id])\n\n respond_to do |format|\n if @test_file.update_attributes(params[:test_file])\n format.html { redirect_to @test_file, notice: 'Test file was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @test_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @incident = Incident.find(params[:id])\n\n if @incident.update(incident_params)\n head :no_content\n else\n render json: @incident.errors, status: :unprocessable_entity\n end\n end", "def patch\n headers = {\"If-Match\" => @version}\n response = @context.request :patch, \"#{@path}/#{@id}\", @data.to_json, headers\n @version += 1\n response\n # 'X-HTTP-Method-Override' => 'PATCH'\n end", "def update\n @file_version = FileVersion.find(params[:id])\n params[:versioned_file_id] = @file_version.versioned_file_id\n if update_versioned_files? \n if @file_version.update(:isActive => true)\n head :no_content\n else\n render json: @file_version.errors, status: :unprocessable_entity\n end \n else \n render json: @file_version.errors, status: :unprocessable_entity\n end\n end", "def update\n respond_to do |format|\n if @incident.update(incident_params)\n format.html { redirect_to @incident, notice: 'Incident was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @incident.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @resource_file = ResourceFile.find(params[:id])\n\n respond_to do |format|\n if @resource_file.update_attributes(params[:resource_file])\n format.html { redirect_to @resource_file, notice: 'Resource file was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @resource_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @file_info = FileInfo.find(params[:id])\n\n respond_to do |format|\n if @file_info.update_attributes(params[:file_info])\n format.html { redirect_to @file_info, notice: 'File info was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @file_info.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @indexed_file = IndexedFile.find(params[:id])\n\n respond_to do |format|\n if @indexed_file.update_attributes(params[:indexed_file])\n format.html { redirect_to @indexed_file, notice: 'Indexed file was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @indexed_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @testfile.update(testfile_params)\n format.html { redirect_to @testfile, notice: 'Testfile was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @testfile.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n error_msg(ErrorCodes::OBJECT_ERROR, \"#{I18n.t \"endnote_files.errors.not_found\"}: #{params[:id]}\")\n render_json\n end", "def update\n @super_file = SuperFile.find(params[:id])\n\n respond_to do |format|\n if @super_file.update_attributes(params[:super_file])\n format.html { redirect_to @super_file, notice: 'Super file was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @super_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @action_file = ActionFile.find(params[:id])\n\n respond_to do |format|\n if @action_file.update_attributes(params[:action_file])\n format.html { redirect_to @action_file, notice: 'Action file was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @action_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def put(path = '/files/', params = {})\n request :put, path, params\n end", "def update\n @bulletin_file = BulletinFile.find(params[:id])\n\n respond_to do |format|\n if @bulletin_file.update_attributes(params[:bulletin_file])\n format.html { redirect_to @bulletin_file, notice: 'Bulletin file was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @bulletin_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n upload = params.require(:file)\n handler = setup_handler(upload)\n\n if handler.valid?\n handler.call\n render json: {}, status: 202\n else\n render json: { errors: handler.errors }, status: 422\n end\n end", "def update\n respond_to do |format|\n if @file_example.update(file_example_params)\n format.html { redirect_to @file_example, notice: 'File example was successfully updated.' }\n format.json { render :show, status: :ok, location: @file_example }\n else\n format.html { render :edit }\n format.json { render json: @file_example.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @incident.update(incident_params)\n format.html { redirect_to @incident, notice: 'Incident was successfully updated.' }\n format.json { render :show, status: :ok, location: @incident }\n else\n format.html { render :edit }\n format.json { render json: @incident.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @incident.update(incident_params)\n format.html { redirect_to @incident, notice: 'Incident was successfully updated.' }\n format.json { render :show, status: :ok, location: @incident }\n else\n format.html { render :edit }\n format.json { render json: @incident.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @incident.update(incident_params)\n format.html { redirect_to @incident, notice: 'Incident was successfully updated.' }\n format.json { render :show, status: :ok, location: @incident }\n else\n format.html { render :edit }\n format.json { render json: @incident.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @incident = Incident.find(params[:id])\n\n respond_to do |format|\n if @incident.update_attributes(params[:incident])\n format.html { redirect_to @incident, notice: 'Incident was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @incident.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @incident = Incident.find(params[:id])\n\n respond_to do |format|\n if @incident.update_attributes(params[:incident])\n format.html { redirect_to @incident, notice: 'Incident was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @incident.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @other_file.update(other_file_params)\n format.html { redirect_to @other_file, notice: 'Other file was successfully updated.' }\n format.json { render :show, status: :ok, location: @other_file }\n else\n format.html { render :edit }\n format.json { render json: @other_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n if @inciting_incident.update(inciting_incident_params)\n render json: @inciting_incident\n else\n render json: @inciting_incident.errors, status: :unprocessable_entity\n end\n end", "def update\n respond_to do |format|\n if @meeting.update(edit_params)\n @meeting.file.attach(params[:meeting][:my_file])\n format.html { redirect_to @meeting, notice: 'Meeting was successfully updated.' }\n format.json { render :show, status: :ok, location: @meeting }\n analyze\n else\n format.html { render :edit }\n format.json { render json: @meeting.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n if params[:file]\n dirname = File.join('Data', 'Received_VcfFiles', @patient.case_id.to_s)\n dir = \"#{Rails.root}/#{dirname}\"\n FileUtils.mkdir(dir) unless File.directory?(dir)\n fname = params[:file].original_filename\n f = \"#{dir}/#{fname}\"\n vcf = UploadedVcfFile.find_by(patient_id: @patient.id, file_name: fname)\n if vcf.nil?\n FileUtils.cp_r(params[:file].tempfile.path, f)\n vcf = UploadedVcfFile.create(patient_id: @patient.id, file_name: fname, user_id: current_user.id)\n else\n # check if VCF file is different from the original one\n unless FileUtils.compare_file(f, params[:file].tempfile.path)\n vcf.updated_at = Time.now.to_datetime\n FileUtils.cp_r(params[:file].tempfile.path, f)\n end\n end\n vcf.save\n end\n if usi_params\n usi = UsiMaterialnr.find_or_create_by(patient_id: @patient.id)\n usi.usi_id = usi_params[:usi_id]\n usi.materialnr = usi_params[:materialnr]\n usi.save\n end\n respond_to do |format|\n if @patient.update(patient_params)\n format.html { redirect_to @patient, notice: 'Patient was successfully updated.' }\n format.json { render :show, status: :ok, location: @patient }\n else\n format.html { render :edit }\n format.json { render json: @patient.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @file_info.update(file_info_params)\n format.html { redirect_to @file_info, notice: 'File info was successfully updated.' }\n format.json { render :show, status: :ok, location: @file_info }\n else\n format.html { render :edit }\n format.json { render json: @file_info.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @datafile.update(datafile_params)\n format.html { redirect_to @datafile, notice: 'Datafile was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @datafile.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @doc_file.update(doc_file_params)\n format.html { redirect_to @doc_file, notice: 'Doc file was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @doc_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n fn = params[:id].gsub('DOTDOT','.').gsub('SLASHSLASH','/')\n File.open(fn,'w+') { |f| \n f.puts params[:content]\n }\n respond_to do |format|\n format.json { render json: { success: true} }\n end\n end", "def update\n respond_to do |format|\n if @incident.update(incident_params)\n format.html { redirect_to incident_plans_path(@incident) }\n format.json { render :show, status: :ok, location: @incident }\n else\n format.html { redirect_to incident_plans_path(@incident) }\n format.json { render json: @incident.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @config_file = ConfigFile.find(params[:id])\n\n respond_to do |format|\n if @config_file.update_attributes(params[:config_file])\n format.html { redirect_to @config_file, notice: 'Config file was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @config_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @userfile = Userfile.find(params[:id])\n\n respond_to do |format|\n if @userfile.update_attributes(params[:userfile])\n format.html { redirect_to @userfile, notice: 'Userfile was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @userfile.errors, status: :unprocessable_entity }\n end\n end\n end", "def update!(params)\n res = @client.put(path, nil, params, \"Content-Type\" => \"application/json\")\n @attributes = res.json if res.status == 201\n res\n end", "def update\n respond_to do |format|\n if @grid_fs_file.update(grid_fs_file_params)\n format.html { redirect_to @grid_fs_file, notice: 'Grid fs file was successfully updated.' }\n format.json { render :show, status: :ok, location: @grid_fs_file }\n else\n format.html { render :edit }\n format.json { render json: @grid_fs_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def update_feature_request(folder_id, feature_content, file_name)\n url = URI(\"#{$base_url}/api/projects/#{$project_id}/folders/#{folder_id}/update_from_feature\")\n\n http = Net::HTTP.new(url.host, url.port)\n http.use_ssl = true\n request = Net::HTTP::Patch.new(url)\n request[\"accept\"] = 'application/vnd.api+json; version=1'\n request[\"access-token\"] = $access_token\n request[\"uid\"] = $uid\n request[\"client\"] = $client\n request[\"Content-Type\"] = 'application/json'\n\n data = {\n data: {\n attributes: {\n \"feature\": feature_content\n }\n }\n }\n\n request.body = JSON.generate(data)\n response = http.request(request)\n\n if response.code == 200.to_s\n update_response = JSON.parse(response.read_body)['data']\n puts \"Feature '#{update_response['attributes']['name']}' with '#{update_response['attributes']['scenarios-count']} scenario(s)' updated.\"\n $success_uploaded_count = $success_uploaded_count + 1\n $uploaded_features_list.push(file_name)\n $updated_count = $updated_count + 1\n else\n $fail_uploaded_count = $fail_uploaded_count + 1\n $not_uploaded_features_list.push(file_name)\n end\n\n response.code\nend", "def update\n @product_bulletin = ProductBulletin.find(params[:id])\n\n respond_to do |format|\n unless params[:bulletin_files].blank?\n params[:bulletin_files]['file'].each do |a|\n @bulletin_file = @product_bulletin.bulletin_files.create!(:file => a, :product_bulletin_id => @product_bulletin.id)\n end\n end\n if @product_bulletin.update_attributes(params[:product_bulletin])\n format.html { redirect_to @product_bulletin, notice: 'Product bulletin was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @product_bulletin.errors, status: :unprocessable_entity }\n end\n end\n end", "def multi_update\n errors = false\n return_value = []\n file_infos_params = params.permit(file_infos: [:id, :review_done, :component_id]).require(:file_infos)\n file_infos_params.each do |key, file_info_entry|\n (return_value << nil) and (errors = true) and next unless file_info_entry[:id]\n file_info = FileInfo.find(file_info_entry[:id])\n (return_value << nil) and (errors = true) and next unless file_info\n if file_info.update(file_info_entry)\n return_value << file_info_entry\n else\n return_value << file_info.errors\n errors = true\n end\n end\n respond_to do |format|\n format.json { render json: return_value }\n if errors\n format.html { redirect_to :back, notice: 'Some entries have errors'}\n else\n format.html { redirect_to :back }\n end\n end\n end", "def perform_study_file_upload(filename, study_file_params, study_id)\n file_upload = Rack::Test::UploadedFile.new(Rails.root.join('test', 'test_data', filename))\n study_file_params[:study_file].merge!(upload: file_upload)\n patch \"/single_cell/studies/#{study_id}/upload\", params: study_file_params, headers: {'Content-Type' => 'multipart/form-data'}\nend", "def update\n description = file_params[:description] || @file.description\n\n raise ApiError, \"Can't rename a file.\" unless @file.rename(file_params[:name], description)\n\n render json: @file, adapter: :json\n end", "def update\n respond_to do |format|\n if @casefile.update(casefile_params)\n format.html { redirect_to @casefile, notice: 'Casefile was successfully updated.' }\n format.json { render :show, status: :ok, location: @casefile }\n else\n format.html { render :edit }\n format.json { render json: @casefile.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @file_upload_attachment.update(file_upload_attachment_params)\n format.html { redirect_to @file_upload_attachment, notice: 'File upload attachment was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @file_upload_attachment.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @file_version = FileVersion.find(params[:id])\n\n respond_to do |format|\n if @file_version.update_attributes(params[:file_version])\n format.html { redirect_to @file_version, notice: 'File version was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @file_version.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n if @complaintfile.update(complaintfile_params)\n render json: @complaintfile, status: :ok\n else\n render json: @complaintfile.errors, status: :unprocessable_entity\n end\n\n #respond_to do |format|\n # if @complaintfile.update(complaintfile_params)\n # format.html { redirect_to @complaintfile, notice: 'Complaintfile was successfully updated.' }\n # format.json { render :show, status: :ok, location: @complaintfile }\n # else\n # format.html { render :edit }\n # format.json { render json: @complaintfile.errors, status: :unprocessable_entity }\n # end\n #end\n end", "def update\n @up_file = UpFile.find(params[:id])\n\n respond_to do |format|\n if @up_file.update_attributes(params[:up_file])\n format.html { redirect_to @up_file, notice: 'Up file was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @up_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @batch_file.update(batch_file_params)\n format.html { redirect_to @batch_file, notice: \"Batch file was successfully updated.\" }\n format.json { render :show, status: :ok, location: @batch_file }\n else\n format.html { render :edit, status: :unprocessable_entity }\n format.json { render json: @batch_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @used_object.update(used_object_params)\n if params[:supporting_files]\n params[:supporting_files].each { |file| \n @used_object.supporting_files.create(file: file)\n }\n end\n format.html { redirect_to @used_object, notice: 'Used object was successfully updated.' }\n format.json { render :show, status: :ok, location: @used_object }\n else\n format.html { render :edit }\n format.json { render json: @used_object.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @fileversion = Fileversion.find(params[:id])\n\n respond_to do |format|\n if @fileversion.update_attributes(params[:fileversion])\n format.html { redirect_to @fileversion, notice: 'Fileversion was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @fileversion.errors, status: :unprocessable_entity }\n end\n end\n end", "def patch(path, **args); end", "def api_patch(path, data = {})\n api_request(:patch, path, :data => data)\n end", "def update\n respond_to do |format|\n if @agent_import_file.update_attributes(params[:agent_import_file])\n format.html { redirect_to @agent_import_file, :notice => t('controller.successfully_updated', :model => t('activerecord.models.agent_import_file')) }\n format.json { head :no_content }\n else\n format.html { render :action => \"edit\" }\n format.json { render :json => @agent_import_file.errors, :status => :unprocessable_entity }\n end\n end\n end", "def update\n @uploaded_file = UploadedFile.find(params[:id])\n\n respond_to do |format|\n if @uploaded_file.update_attributes(params[:uploaded_file])\n format.html { redirect_to @uploaded_file, notice: 'Uploaded file was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @uploaded_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n upload = params.require(:file)\n handler = create_handler(params[:id], upload)\n\n if handler.valid?\n render json: attachment_json(handler.call)\n else\n render json: errors_json(handler), status: :unprocessable_entity\n end\n end", "def update\n @contest = Contest.find_by(path: params[:id])\n\n respond_to do |format|\n if @contest.update_attributes(params[:contest])\n format.html { redirect_to contest_path(@contest.path)+'/upload' }\n #format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n #format.json { render json: @contest.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n if params[:file]\n file = params[:file].read\n data = JSON.parse(file)\n ActiveRecord::Base.transaction do\n @patient = Patient.find_by(case_id: data['case_id'])\n if @patient.valid?\n @patient.update_json(data)\n name = params[:file].original_filename\n path = File.join(\"Data\", \"jsons\", name)\n File.open(path, \"wb\") { |f| f.write(file) }\n end\n end\n end\n if usi_params\n usi = UsiMaterialnr.find_or_create_by(patient_id:@patient.id)\n usi.usi_id = usi_params[:usi_id]\n usi.materialnr = usi_params[:materialnr]\n usi.save\n end\n respond_to do |format|\n if @patient.update(patient_params)\n format.html { redirect_to @patient, notice: 'Patient was successfully updated.' }\n format.json { render :show, status: :ok, location: @patient }\n else\n format.html { render :edit }\n format.json { render json: @patient.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @file_sequence = FileSequence.find(params[:id])\n\n respond_to do |format|\n if @file_sequence.update_attributes(file_sequence_params)\n format.html { redirect_to \"/file_sequences\", notice: 'File sequence was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @file_sequence.errors, status: :unprocessable_entity }\n end\n end\n end", "def patch(path, data)\n request 'PATCH', path, body: data.to_json\n end", "def update\n @complaint = Complaint.find(params[:id])\n\n updated = @complaint.update_attributes(params[:complaint])\n\n update_files if updated\n\n respond_to do |format|\n if updated\n format.html { redirect_to @complaint, notice: I18n.t('complaints.notice.update') }\n format.json { head :ok }\n else\n\t\t\t\t# to initialize the datetime fields\n\t\t\t\tgon.edit_complaint = true\n\t\t\t\tgon.violation_time = @complaint.violation_time.strftime('%m/%d/%Y %H:%M') if @complaint.violation_time\n\n format.html { render action: \"edit\" }\n format.json { render json: @complaint.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @attachment.update(attachment_params)\n format.html { redirect_to @attachment, notice: 'Attachment was successfully updated.' }\n format.json { render :show, status: :ok, location: @attachment }\n else\n format.html { render :edit }\n format.json { render json: @attaessay_file_namechment.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @file_upload.update(file_upload_params)\n format.html { redirect_to @file_upload, notice: 'File upload was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @file_upload.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @expenses_file.update(expenses_file_params)\n format.html { redirect_to @expenses_file, notice: 'Expenses file was successfully updated.' }\n format.json { render :show, status: :ok, location: @expenses_file }\n else\n format.html { render :edit }\n format.json { render json: @expenses_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @inventory_file.update_attributes(inventory_file_params)\n flash[:notice] = t('controller.successfully_updated', :model => t('activerecord.models.inventory_file'))\n format.html { redirect_to(@inventory_file) }\n format.json { head :no_content }\n else\n format.html { render :action => \"edit\" }\n format.json { render :json => @inventory_file.errors, :status => :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @datafile.update(datafile_params)\n format.html { redirect_to @datafile, notice: 'Datafile was successfully updated.' }\n format.json { render :show, status: :ok, location: @datafile }\n else\n format.html { render :edit }\n format.json { render json: @datafile.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @bug = Bug.find(params[:id])\n Bug.save_file(params,@bug) if params[:keepold].nil? or params[:keepold].empty?\n updateparams = params\n updateparams[:bug][:fileuploadpath] = @bug.fileuploadpath\n respond_to do |format|\n if @bug.update_attributes(updateparams[:bug])\n format.html { redirect_to bugs_path, notice: 'Bug was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @bug.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @ufile = Ufile.find(params[:id])\n\n respond_to do |format|\n if @ufile.update_attributes(params[:ufile])\n format.html { redirect_to @ufile, notice: 'Ufile was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @ufile.errors, status: :unprocessable_entity }\n end\n end\n end", "def update_aos_version(args = {}) \n id = args['id']\n temp_path = \"/aosversions.json/{aosVersionId}\"\n path = temp_path\nargs.keys.each do |key|\n if (key == \"aosversionId\")\n args.delete(key)\n path = temp_path.gsub(\"{#{key}}\", id)\n end\nend\n puts \" PATH : #{path}\"\n put(path, args)\nend", "def update\n @asset_file = current_user.asset_files.find(params[:id]) \n\n respond_to do |format|\n if @asset_file.update(asset_file_params)\n format.html { redirect_to @asset_file, notice: 'File was successfully updated.' }\n format.json { render :show, status: :ok, location: @asset_file }\n else\n format.html { render :edit }\n format.json { render json: @asset_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @report_file = ReportFile.find(params[:id])\n\n respond_to do |format|\n if @report_file.update_attributes(params[:report_file])\n format.html { redirect_to @report_file, notice: 'Report file was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @report_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @nthfile.update(nthfile_params)\n format.html { redirect_to @nthfile, notice: 'Nthfile was successfully updated.' }\n format.json { render :show, status: :ok, location: @nthfile }\n else\n format.html { render :edit }\n format.json { render json: @nthfile.errors, status: :unprocessable_entity }\n end\n end\n end", "def update_template_files(opts)\n template_id = opts.delete(:template_id)\n path = \"/template/update_files/#{template_id}\"\n prepare_files opts\n\n HelloSign::Resource::Template.new post(path, body: opts)\n end", "def update\n respond_to do |format|\n if @inventory_file.update(inventory_file_params)\n flash[:notice] = t('controller.successfully_updated', model: t('activerecord.models.inventory_file'))\n format.html { redirect_to(@inventory_file) }\n format.json { head :no_content }\n else\n prepare_options\n format.html { render action: \"edit\" }\n format.json { render json: @inventory_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @userfile.update(userfile_params)\n format.html { redirect_to @userfile, notice: 'Userfile was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @userfile.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n return if auth(\"website_administrator\")\n @incident = Incident.find(params[:id])\n\n respond_to do |format|\n if @incident.update_attributes(params[:incident])\n format.html { redirect_to @incident, :notice => 'Incident was successfully updated.' }\n format.json { head :ok }\n else\n format.html { render :action => \"edit\" }\n format.json { render :json => @incident.errors, :status => :unprocessable_entity }\n end\n end\n end", "def update\n @incident = Incident.find(params[:id])\n\n respond_to do |format|\n if @incident.update_attributes(params[:incident])\n format.html { redirect_to(@incident) }\n format.xml { head :ok }\n else\n format.html { render :action => \"edit\" }\n format.xml { render :xml => @incident.errors, :status => :unprocessable_entity }\n end\n end\n end", "def update\n @file_record = FileRecord.find(params[:id])\n\n respond_to do |format|\n if @file_record.update_attributes(params[:file_record])\n format.html { redirect_to @file_record, notice: 'File record was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @file_record.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @event_import_file.update_attributes(params[:event_import_file])\n flash[:notice] = t('controller.successfully_updated', :model => t('activerecord.models.event_import_file'))\n format.html { redirect_to(@event_import_file) }\n format.json { head :no_content }\n else\n format.html { render :action => \"edit\" }\n format.json { render :json => @event_import_file.errors, :status => :unprocessable_entity }\n end\n end\n end", "def update\n @use_case = UseCase.find(params[:id])\n\n respond_to do |format|\n if @use_case.update_attributes(params[:use_case])\n\n if params[:use_case].has_key?(:file) && params[:use_case][:file]\n @use_case.upload_main_asset(params[:use_case][:file])\n elsif params.has_key?(:file) && params[:file]\n @use_case.upload_main_asset(params[:file])\n end\n \n format.html { redirect_to @use_case, notice: 'Use case was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @use_case.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @file.project_id = session[:project_id]\n respond_to do |format|\n if @file.update(file_params)\n format.html { render text: 'created', layout: false, status: :created }\n format.json { head :no_content }\n else\n params[:id] = @file.directory_id\n format.html { render action: 'edit', layout: false }\n format.json { render json: @file.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @attachfile.update(attachfile_params)\n format.html { redirect_to @attachfile, notice: 'Attachfile was successfully updated.' }\n format.json { render :show, status: :ok, location: @attachfile }\n else\n format.html { render :edit }\n format.json { render json: @attachfile.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n uploaded_io = params[:file]\n if !uploaded_io.blank?\n extension=uploaded_io.original_filename.split('.')\n filename= \"#{Time.now.strftime('%Y%m%d%H%M%S')}.#{extension[-1]}\"\n File.open(Rails.root.join('public', 'upload','teachResources',filename), 'wb') do |file|\n file.write(uploaded_io.read)\n end\n params[:lab_teach_resource].merge!(:file=>\"/upload/teachResourcs/#{filename}\")\n end\n @lab_teach_resource = LabTeachResource.find(params[:id])\n\n respond_to do |format|\n if @lab_teach_resource.update_attributes(params[:lab_teach_resource])\n format.html { redirect_to @lab_teach_resource, notice: 'Lab teach resource was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @lab_teach_resource.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @agent_import_file.update(agent_import_file_params)\n if @agent_import_file.mode == 'import'\n AgentImportFileJob.perform_later(@agent_import_file)\n end\n format.html { redirect_to @agent_import_file, notice: t('controller.successfully_updated', model: t('activerecord.models.agent_import_file')) }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @agent_import_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @source_file.update(source_file_params)\n format.html { redirect_to @source_file, notice: 'Source file was successfully updated.' }\n format.json { render :show, status: :ok, location: @source_file }\n else\n format.html { render :edit }\n format.json { render json: @source_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @node_incident = NodeIncident.find(params[:id])\n\n respond_to do |format|\n if @node_incident.update_attributes(params[:node_incident])\n format.html { redirect_to @node_incident, :notice => 'Node incident was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render :action => \"edit\" }\n format.json { render :json => @node_incident.errors, :status => :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @asset_file.update(asset_file_params)\n format.html { redirect_to @asset_file, notice: 'Asset file was successfully updated.' }\n format.json { render :show, status: :ok, location: @asset_file }\n else\n format.html { render :edit }\n format.json { render json: @asset_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @workfile.update(workfile_params)\n format.html { redirect_to @workfile, notice: 'Workfile was successfully updated.' }\n format.json { render :show, status: :ok, location: @workfile }\n else\n format.html { render :edit }\n format.json { render json: @workfile.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @user_file = UserFile.find(params[:id])\n authorize! :update, @user_file\n respond_to do |format|\n if @user_file.update_attributes(params[:user_file])\n format.html { redirect_to @user_file, :notice => 'User file was successfully updated.' }\n format.json { head :ok }\n else\n format.html { render :action => \"edit\" }\n format.json { render :json => @user_file.errors, :status => :unprocessable_entity }\n end\n end\n end", "def update\n unless admin_people_file_params[:title].blank?\n unless admin_people_file_params[:description].blank?\n unless admin_people_file_params[:file].blank?\n @admin_people_file.remove_file!\n end\n end\n end\n respond_to do |format|\n if @admin_people_file.update(admin_people_file_params)\n format.html { redirect_to @admin_people_file, notice: 'People file was successfully updated.' }\n format.json { render :show, status: :ok, location: @admin_people_file }\n else\n format.html { render :edit }\n format.json { render json: @admin_people_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @commit_filepath.update(commit_filepath_params)\n format.html { redirect_to @commit_filepath, notice: 'Commit filepath was successfully updated.' }\n format.json { render :show, status: :ok, location: @commit_filepath }\n else\n format.html { render :edit }\n format.json { render json: @commit_filepath.errors, status: :unprocessable_entity }\n end\n end\n end", "def incidentfile_params\n params.require(:incidentfile).permit(:incident_id, :filetype, :state)\n end", "def update\n respond_to do |format|\n if @attachinaryfile.update(attachinaryfile_params)\n format.html { redirect_to new_mannequin_attachinary_file_path, notice: 'Attachinaryfile was successfully updated.' }\n format.json { render :show, status: :ok, location: @attachinaryfile }\n else\n format.html { render :edit }\n format.json { render json: @attachinaryfile.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @event_import_file.update_attributes(event_import_file_params)\n if @event_import_file.mode == 'import'\n Resque.enqueue(EventImportFileQueue, @event_import_file.id)\n end\n format.html { redirect_to @event_import_file, notice: t('controller.successfully_updated', model: t('activerecord.models.event_import_file')) }\n format.json { head :no_content }\n else\n prepare_options\n format.html { render action: \"edit\" }\n format.json { render json: @event_import_file.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @file_record.update(file_record_params)\n format.html { redirect_to @file_record, notice: \"File record was successfully updated.\" }\n format.json { render :show, status: :ok, location: @file_record }\n else\n format.html { render :edit, status: :unprocessable_entity }\n format.json { render json: @file_record.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @docfile.update(docfile_params)\n format.html { redirect_to @docfile, notice: \"Docfile was successfully updated.\" }\n format.json { render :show, status: :ok, location: @docfile }\n else\n format.html { render :edit, status: :unprocessable_entity }\n format.json { render json: @docfile.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @file_record.update(file_record_params)\n format.html { redirect_to @file_record, notice: 'File record was successfully updated.' }\n format.json { render :show, status: :ok, location: @file_record }\n else\n format.html { render :edit }\n format.json { render json: @file_record.errors, status: :unprocessable_entity }\n end\n end\n end" ]
[ "0.7109997", "0.6488839", "0.6424058", "0.6397617", "0.6268176", "0.6266148", "0.6218408", "0.62089175", "0.62015504", "0.6189543", "0.61234224", "0.60915154", "0.6084955", "0.60793304", "0.6070471", "0.60656935", "0.6038007", "0.60187334", "0.601735", "0.6000651", "0.5987279", "0.5980956", "0.5958554", "0.5934827", "0.5934827", "0.5934827", "0.5913746", "0.5913746", "0.59108186", "0.5897394", "0.58895415", "0.5871909", "0.5862654", "0.58609945", "0.58428085", "0.5833512", "0.58280706", "0.582776", "0.58265126", "0.58261955", "0.58256644", "0.58243537", "0.58066726", "0.5802758", "0.5793341", "0.57846683", "0.577694", "0.57731795", "0.5767877", "0.57666934", "0.5766612", "0.5765942", "0.5759075", "0.57506245", "0.57198167", "0.5703079", "0.56973255", "0.5696727", "0.5693382", "0.5688372", "0.56818134", "0.56806856", "0.56806177", "0.5674418", "0.5667489", "0.56576675", "0.565563", "0.5649349", "0.56477326", "0.5645167", "0.56437856", "0.56399864", "0.562684", "0.5621464", "0.5603501", "0.5597525", "0.5597412", "0.5594429", "0.5592822", "0.5580499", "0.55764866", "0.55761826", "0.5575913", "0.5561421", "0.55535513", "0.55468136", "0.5545814", "0.5539935", "0.5528954", "0.5523635", "0.55144346", "0.5513921", "0.5510641", "0.55039775", "0.5503445", "0.54997826", "0.5499292", "0.54930484", "0.5492516", "0.5491477" ]
0.7129484
0
DELETE /incidentfiles/1 DELETE /incidentfiles/1.json
def destroy @incidentfile.destroy respond_to do |format| format.html { redirect_to incidentfiles_url, notice: 'Incidentfile was successfully destroyed.' } format.json { head :no_content } end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def destroy\n @incidentattachment.destroy\n respond_to do |format|\n format.html { redirect_to incidentattachments_url, notice: 'Incidentattachment was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def incident_delete(statuspage_id, incident_id)\n data = {}\n data['statuspage_id'] = statuspage_id\n data['incident_id'] = incident_id\n\n request :method => :post,\n :url => @url + 'incident/delete',\n :payload => data\n end", "def destroy\n @indexed_file = IndexedFile.find(params[:id])\n @indexed_file.destroy\n\n respond_to do |format|\n format.html { redirect_to indexed_files_url }\n format.json { head :no_content }\n end\n end", "def delete(container_name, file_name)\n validate_path_elements(container_name, file_name)\n\n client.request(\n method: :delete,\n path: \"#{container_name}/#{file_name}\",\n expected: 204\n )\n end", "def destroy\n @test_file = TestFile.find(params[:id])\n @test_file.destroy\n\n respond_to do |format|\n format.html { redirect_to test_files_url }\n format.json { head :no_content }\n end\n end", "def delete_file(path)\n \n puts \"Sending path via MCollective Files client\"\n @mc.delete(:path => path)\n printrpcstats\n \n end", "def destroy\n @bulletin_file = BulletinFile.find(params[:id])\n @bulletin_file.destroy\n\n respond_to do |format|\n format.html { redirect_to bulletin_files_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @action_file = ActionFile.find(params[:id])\n @action_file.destroy\n\n respond_to do |format|\n format.html { redirect_to action_files_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @agent_import_file.destroy\n\n respond_to do |format|\n format.html { redirect_to(agent_import_files_url) }\n format.json { head :no_content }\n end\n end", "def b2_delete_file(file)\n\n if parse_files_json(file) == {}\n\n puts \"File not present\"\n\n else\n \n result_hash = convert_json(b2_delete_file_version(file))\n\n if result_hash[\"fileName\"] == file\n puts \"File deleted successfully\"\n else\n puts \"Error deleting file\"\n end\n\n end\n\nend", "def destroy\n @file_info = FileInfo.find(params[:id])\n @file_info.destroy\n\n respond_to do |format|\n format.html { redirect_to file_infos_url }\n format.json { head :no_content }\n end\n end", "def delete\n unless FileDescriptor.exists?(filename: params[:fname])\n fpath = filePath params[:fname]\n begin\n File.delete fpath\n result = {status: 'ok'}\n status = 200\n rescue Exception => e\n result = {status: 'error', message: e.message}\n status = 500\n end\n else\n result = {status: 'error', message: 'File is open'}\n status = 403 # Forbidden\n end\n render json: result.to_json, status: status\n end", "def delete_file(filename,repo)\n curl_delete(\"#{self.host}/api2/repos/#{repo}/file/?p=#{filename}\").body_str\n end", "def destroy\n @resource_file = ResourceFile.find(params[:id])\n @resource_file.destroy\n\n respond_to do |format|\n format.html { redirect_to resource_files_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @incident.destroy\n respond_to do |format|\n format.html { redirect_to incidents_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @incident.destroy\n respond_to do |format|\n format.html { redirect_to incidents_url }\n format.json { head :no_content }\n end\n end", "def deleteFileFromServer(filepath)\n filepath = filepath[1, filepath.length - 1] \n address = @@host + \"/user/\" + @@conf[\"username\"] + \"/device/\" + @@conf[\"dev_name\"] + \"/files/\" + filepath\n \n res = HttpRequest.new(:delete, address).send(@@host) \n puts res\n puts \"CODE: \" + res.code\n\nend", "def destroy\n @ticket_file.destroy\n respond_to do |format|\n format.html { redirect_to ticket_files_url, notice: 'Ticket file was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def delete(path = '/files/', params = {})\n request :delete, path, params\n end", "def delete(id)\n # Requires authorization\n raise PutioError::AuthorizationRequired if authentication_required!\n\n if id.is_a? Array then\n id = id.join(',')\n end\n\n make_post_call('/files/delete?file_ids=%s' % [id]).status == \"OK\"\n end", "def destroy\n @inventory_file.destroy\n\n respond_to do |format|\n format.html { redirect_to inventory_files_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @inventory_file.destroy\n\n respond_to do |format|\n format.html { redirect_to inventory_files_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @datafile.destroy\n respond_to do |format|\n format.html { redirect_to datafiles_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @file.destroy\n respond_to do |format|\n format.html { render text: 'created', layout: false, status: :created }\n format.json { head :no_content }\n end\n end", "def destroy\n @attachinaryfile.destroy\n respond_to do |format|\n format.html { redirect_to attachinaryfiles_url, notice: 'Attachinaryfile was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @incident = Incident.find(params[:id])\n @incident.destroy\n\n respond_to do |format|\n format.html { redirect_to incidents_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @incident = Incident.find(params[:id])\n @incident.destroy\n\n respond_to do |format|\n format.html { redirect_to incidents_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @event_import_file.destroy\n\n respond_to do |format|\n format.html { redirect_to(event_import_files_url) }\n format.json { head :no_content }\n end\n end", "def destroy\n @incident = Incident.find(params[:id])\n @incident.destroy\n\n head :no_content\n end", "def destroy\n @file = current_user.past_exams.find(params[:id])\n\t\[email protected]\n respond_to do |format|\n format.html { redirect_to file_infos_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @event_import_file.destroy\n\n respond_to do |format|\n format.html { redirect_to event_import_files_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @expenses_file.destroy\n respond_to do |format|\n format.html { redirect_to expenses_files_url, notice: 'Expenses file was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @file_upload_attachment.destroy\n respond_to do |format|\n format.html { redirect_to file_upload_attachments_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @up_file = UpFile.find(params[:id])\n @up_file.destroy\n\n respond_to do |format|\n format.html { redirect_to up_files_url }\n format.json { head :no_content }\n end\n end", "def delete_files(uuids)\n Uploadcare::FileList.batch_delete(uuids)\n end", "def destroy\n @purchase_file = PurchaseFile.find(params[:id])\n @purchase_file.destroy\n\n respond_to do |format|\n format.html { redirect_to purchase_files_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @agent_import_file.destroy\n\n respond_to do |format|\n format.html { redirect_to agent_import_files_url, notice: t('controller.successfully_deleted', model: t('activerecord.models.agent_import_file')) }\n format.json { head :no_content }\n end\n end", "def deleteFile(bucket, file, client)\n\tfilename = File.basename(file)\n\tbegin\n\t \tresp = client.client.delete_objects({\n\t \t\tbucket: bucket,\n\t\t\tdelete: { objects: [\n\t\t\t\t{ key: filename }\n\t\t\t],\n\t\t\tquiet: false }\n\t\t})\n\trescue Exception => e\n\t\tputs \"Wrong file name\"\n\t\tputs e\n\t\texit\n\tend\n\treturn resp\nend", "def destroy\n @super_file = SuperFile.find(params[:id])\n @super_file.destroy\n\n respond_to do |format|\n format.html { redirect_to super_files_url }\n format.json { head :no_content }\n end\n end", "def delete(path)\n path = relativize_path path\n\n Precog.connect self do |http|\n uri = Addressable::URI.new\n uri.query_values = { :apiKey => api_key }\n\n http.delete \"/ingest/v#{VERSION}/fs/#{path}?#{uri.query}\"\n end\n end", "def destroy\n @fileversion = Fileversion.find(params[:id])\n @fileversion.destroy\n\n respond_to do |format|\n format.html { redirect_to fileversions_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @money_arrival_file.destroy\n respond_to do |format|\n format.html { redirect_to money_arrival_files_url, notice: 'Money arrival file was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def file_delete(path)\n params = {\n \"root\" => @root,\n \"path\" => format_path(path, false),\n }\n response = @session.do_post build_url(\"/fileops/delete\", params)\n parse_response(response)\n end", "def destroy\n @attachfile.destroy\n respond_to do |format|\n format.html { redirect_to attachfiles_url, notice: 'Attachfile was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n #Finds selected data file\n @data_file = DataFile.find(params[:id])\n #destroys data file\n @data_file.destroy\n respond_to do |format|\n format.html { redirect_to '/admin' }\n format.json { head :ok }\n end\n end", "def b2_delete_file_version(file)\n\n auth_hash = convert_json(b2_authorize_account)\n api_url = auth_hash[\"apiUrl\"]\n account_authorization_token = auth_hash[\"authorizationToken\"]\n\n file_hash = parse_files_json(file)\n file_name = file\n file_id = file_hash[file]\n\n uri = URI(\"#{api_url}/b2api/v1/b2_delete_file_version\")\n req = Net::HTTP::Post.new(uri)\n req.add_field(\"Authorization\",\"#{account_authorization_token}\")\n req.body = \"{\\\"fileName\\\":\\\"#{file_name}\\\", \\\"fileId\\\":\\\"#{file_id}\\\"}\"\n http = Net::HTTP.new(req.uri.host, req.uri.port)\n http.use_ssl = true\n res = http.start {|http| http.request(req)}\n\n case res\n when Net::HTTPSuccess then res.body\n when Net::HTTPRedirection then fetch(res['location'], limit - 1)\n else res.error!\n end\n\nend", "def destroy\n Track.destroy(params[:id])\n delete_file BSON::ObjectId(params[:id])\n\n respond_to do |format|\n format.html { redirect_to root_url }\n format.json { head :no_content }\n end\n end", "def delete_file(file_name)\n fail 'No Structure ID defined for structure. Can\\'t delete file' if @structure.id.nil?\n\n data = Hashie::Mash.new\n data.structure_id = @structure.id\n data.file_name = file_name\n\n push_file('api/remove_file', MultiJson.dump(data))\n end", "def destroy\n @file_record = FileRecord.find(params[:id])\n @file_record.destroy\n\n respond_to do |format|\n format.html { redirect_to file_records_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @file_version = FileVersion.find(params[:id])\n @file_version.destroy\n\n respond_to do |format|\n format.html { redirect_to file_versions_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @userfile = Userfile.find(params[:id])\n @userfile.destroy\n\n respond_to do |format|\n format.html { redirect_to uploads_url }\n format.json { head :no_content }\n end\n end", "def destroy\n #FIXME: Double check auth is working for deletion. Also, maybe should only delete if not associated with any experiments.\n @data_file.destroy\n \n respond_to do |format|\n format.html { redirect_to(data_files_path) }\n format.xml { head :ok }\n end\n end", "def delete\n client.delete(\"/#{id}\")\n end", "def destroy\n @grid_fs_file.destroy\n respond_to do |format|\n format.html { redirect_to grid_fs_files_url, notice: 'Grid fs file was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @file_sequence = FileSequence.find(params[:id])\n @file_sequence.destroy\n\n respond_to do |format|\n format.html { redirect_to file_sequences_url }\n format.json { head :no_content }\n end\n end", "def delete_one(file)\n files_collection.find(:_id => file.id).delete_one\n chunks_collection.find(:files_id => file.id).delete_many\n end", "def destroy\n @backup_file = BackupFile.find(params[:id])\n @backup_file.destroy\n\n respond_to do |format|\n format.html { redirect_to backup_files_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @cfile.destroy\n respond_to do |format|\n format.html { redirect_to :back }\n format.json { head :no_content }\n end\n end", "def destroy\n @report_file = ReportFile.find(params[:id])\n @report_file.destroy\n\n respond_to do |format|\n format.html { redirect_to report_files_url }\n format.json { head :no_content }\n end\n end", "def delete(file_path)\n file_name = File.basename(file_path)\n object = @bucket.objects[file_name]\n object.delete\n end", "def destroy\n @cfile.destroy\n respond_to do |format|\n format.html { redirect_to cfiles_url, notice: 'Cfile was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @ufile = Ufile.find(params[:id])\n @ufile.destroy\n\n respond_to do |format|\n format.html { redirect_to ufiles_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @incident = Incident.find(params[:id])\n @incident.destroy\n\n respond_to do |format|\n format.html { redirect_to(incidents_url) }\n format.xml { head :ok }\n end\n end", "def destroy\n @incident = Incident.find(params[:id])\n @incident.destroy\n\n respond_to do |format|\n format.html { redirect_to(incidents_url) }\n format.xml { head :ok }\n end\n end", "def test_del\n header 'Content-Type', 'application/json'\n\n data = File.read 'sample-traces/0.json'\n post('/traces', data, 'CONTENT_TYPE': 'application/json')\n\n id = last_response.body\n\n delete \"/traces/#{id}\"\n assert last_response.ok?\n\n get \"/traces/#{id}\"\n\n contents = JSON.parse last_response.body\n assert_kind_of(Hash, contents, 'Response contents is not a hash')\n assert contents.key? 'description'\n assert(!last_response.ok?)\n end", "def destroy\n @incident.destroy\n respond_to do |format|\n format.html { redirect_to incidents_url, notice: 'Incident was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @incident.destroy\n respond_to do |format|\n format.html { redirect_to incidents_url, notice: 'Incident was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @incident.destroy\n respond_to do |format|\n format.html { redirect_to incidents_url, notice: 'Incident was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @incident.destroy\n respond_to do |format|\n format.html { redirect_to incidents_url, notice: 'Incident was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @incident.destroy\n respond_to do |format|\n format.html { redirect_to incidents_url, notice: 'Incident was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @commit_filepath.destroy\n respond_to do |format|\n format.html { redirect_to commit_filepaths_url, notice: 'Commit filepath was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @file_info.destroy\n respond_to do |format|\n format.html { redirect_to file_infos_url, notice: 'File info was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @file_upload.destroy\n respond_to do |format|\n format.html { redirect_to file_uploads_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @other_file.destroy\n respond_to do |format|\n format.html { redirect_to other_files_url, notice: 'Other file was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @datafile.destroy\n respond_to do |format|\n format.html { redirect_to datafiles_url, notice: 'Datafile was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @nthfile.destroy\n respond_to do |format|\n format.html { redirect_to nthfiles_url, notice: 'Nthfile was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @seo_file.destroy\n respond_to do |format|\n format.html { redirect_to seo_files_url, notice: 'Seo file was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @casefile.destroy\n respond_to do |format|\n format.html { redirect_to casefiles_url, notice: 'Casefile was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @config_file = ConfigFile.find(params[:id])\n @config_file.destroy\n\n respond_to do |format|\n format.html { redirect_to config_files_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @node_incident = NodeIncident.find(params[:id])\n @node_incident.destroy\n\n respond_to do |format|\n format.html { redirect_to node_incidents_url }\n format.json { head :no_content }\n end\n end", "def delete_json(path)\n url = [base_url, path].join\n resp = HTTParty.delete(url, headers: standard_headers)\n parse_json(url, resp)\n end", "def destroy\n @file_item = FileItem.cs(self.current_scope).find_by_path(params[:id])\n @file_item.destroy\n\n respond_to do |format|\n format.html { redirect_to file_items_url }\n format.json { head :ok }\n end\n end", "def destroy\n @userfile.destroy\n\n fileName = Rails.root.join('public','uploads', @userfile.file_name_slug)\n File.delete(fileName) if File.exist?(fileName)\n\n respond_to do |format|\n format.html { redirect_to user_mycontact_meeting_userfiles_path }\n format.json { head :no_content }\n end\n end", "def destroy\n self.class.mongo_client.database.fs.find(:_id=>BSON::ObjectId.from_string(@id)).delete_one\n end", "def destroy\n self.class.mongo_client.database.fs.find(:_id=>BSON::ObjectId.from_string(@id)).delete_one\n end", "def delete(uuid, key)\n request(method: 'DELETE', uri: \"/files/#{uuid}/metadata/#{key}/\")\n end", "def destroy\n return if auth(\"website_administrator\")\n @incident = Incident.find(params[:id])\n @incident.destroy\n\n respond_to do |format|\n format.html { redirect_to incidents_url }\n format.json { head :ok }\n end\n end", "def destroy\n @file_example.destroy\n respond_to do |format|\n format.html { redirect_to file_examples_url, notice: 'File example was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @file_attachment = FileAttachment.find(params[:id])\n @test_case = @file_attachment.test_case\n @file_attachment.destroy\n\n respond_to do |format|\n format.html { redirect_to(@test_case || file_attachments_path) }\n format.xml { head :ok }\n end\n end", "def destroy\n @subject_file.destroy\n respond_to do |format|\n format.html { redirect_to subject_files_path(:subject => \"all\"), notice: 'Subject file was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def delete(path, opts = {})\n input_json = {\n path: path,\n }\n response = @session.do_rpc_endpoint(\"/#{ @namespace }/delete\", input_json)\n Dropbox::API::File.from_json(Dropbox::API::HTTP.parse_rpc_response(response))\n end", "def delete(path)\n RestClient.delete request_base+path\n end", "def destroy\n \tself.class.mongo_client.database.fs.find(:_id => BSON::ObjectId(@id)).delete_one\n end", "def destroy\n @feefile = Feefile.find(params[:id])\n directory= \"uploads\"\n path =File.join(directory,@feefile.feefilename)\n File.delete(path)\n @feefile.destroy\n \n\n respond_to do |format|\n format.html { redirect_to(feefiles_url) }\n format.xml { head :ok }\n end\n end", "def delete path\n make_request(path, \"delete\", {})\n end", "def delete_file(file)\n delete_attachment(file)\n end", "def destroy\n @history_file_delete.destroy\n respond_to do |format|\n format.html { redirect_to history_file_deletes_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @testfile.destroy\n respond_to do |format|\n format.html { redirect_to materials_path }\n format.json { head :no_content }\n end\n end", "def delete_service_files(resource)\n file get_service_script_name(resource) do\n action :delete\n only_if { get_service_script_name != nil }\n end\nend", "def destroy\n @study_file.destroy\n respond_to do |format|\n format.html { redirect_to study_files_url, notice: 'Study file was successfully destroyed.' }\n format.json { head :no_content }\n end\n end" ]
[ "0.71035445", "0.70188427", "0.6959227", "0.6935799", "0.6914682", "0.68984425", "0.68955624", "0.6882368", "0.6856593", "0.6834718", "0.6815744", "0.6796455", "0.679489", "0.6792165", "0.67862576", "0.67862576", "0.6785758", "0.67519444", "0.67402273", "0.67349565", "0.67213136", "0.67213136", "0.6718989", "0.6715831", "0.671389", "0.6708987", "0.6708987", "0.67077684", "0.6687346", "0.6685449", "0.6679872", "0.66733915", "0.66665846", "0.66471195", "0.66419226", "0.66414684", "0.66348946", "0.6633519", "0.6629797", "0.66260237", "0.66245306", "0.66201997", "0.6600891", "0.659941", "0.6597377", "0.65835476", "0.657568", "0.65731174", "0.6567043", "0.65662336", "0.6566045", "0.6562219", "0.65604526", "0.65560764", "0.6555352", "0.65535915", "0.6553359", "0.65406597", "0.65405244", "0.65390974", "0.65366113", "0.6518793", "0.6517832", "0.6517832", "0.6517552", "0.6515", "0.6515", "0.6515", "0.6515", "0.6515", "0.65091115", "0.6502781", "0.6499409", "0.6495568", "0.6491033", "0.64899987", "0.6486661", "0.6479596", "0.6477798", "0.647436", "0.6473912", "0.6472425", "0.64606875", "0.64557934", "0.64557934", "0.64549303", "0.64501345", "0.64480287", "0.644565", "0.64446247", "0.6440831", "0.6440335", "0.6439514", "0.6439107", "0.64381444", "0.6436195", "0.6434248", "0.6433006", "0.6430205", "0.64297116" ]
0.777575
0
Use callbacks to share common setup or constraints between actions.
def set_incidentfile @incidentfile = Incidentfile.find(params[:id]) end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_required_actions\n # TODO: check what fields change to asign required fields\n end", "def action_hook; end", "def run_actions; end", "def define_action_hook; end", "def actions; end", "def define_action_helpers\n if super && action == :save\n @instance_helper_module.class_eval do\n define_method(:valid?) do |*args|\n self.class.state_machines.fire_event_attributes(self, :save, false) { super(*args) }\n end\n end\n end\n end", "def add_actions; end", "def callbacks; end", "def callbacks; end", "def setup *actions, &proc\n (@setup_procs ||= []) << [proc, actions.size > 0 ? actions : [:*]]\n end", "def define_action_helpers; end", "def post_setup\n end", "def action_methods; end", "def action_methods; end", "def action_methods; end", "def before_setup; end", "def action_run\n end", "def execute(setup)\n @action.call(setup)\n end", "def define_action_helpers?; end", "def set_actions\n actions :all\n end", "def action_done(action)\n dispatch = { :migrate => :done_migrating, :map => :done_mapping, :reduce =>\n :done_reducing, :finalize => :done_finalizing } \n self.send dispatch[action[:action]], action\n end", "def dependencies action, &block\n @actions.each do |other|\n if action[:requires].include? other[:provide]\n block.call other\n end\n end\n end", "def setup!\n return unless @setup_procs\n http_actions = actions\n @setup_procs.each do |setup_proc|\n proc, actions = setup_proc\n @setup__actions = actions.map do |action|\n\n action.is_a?(Regexp) ?\n http_actions.select { |a| a.to_s =~ action } :\n action.is_a?(String) && action =~ /\\A\\./ ?\n http_actions.map { |a| a.to_s << action if format?(a).include?(action) }.compact :\n action\n\n end.flatten\n self.class_exec &proc\n @setup__actions = nil\n end\n @setup_procs = nil\n end", "def before_actions(*logic)\n self.before_actions = logic\n end", "def setup_handler\n end", "def set_action(opts)\n opts = check_params(opts,[:actions])\n super(opts)\n end", "def setup(action)\n @targets.clear\n unless action.item.target_filters.empty?\n @targets = SES::TargetManager.make_targets(action)\n else\n item = action.item\n if item.for_opponent?\n @targets = $game_troop.alive_members\n elsif item.for_dead_friend?\n @targets = $game_party.battle_members.select { |actor| actor.dead? }\n else\n $game_party.battle_members.select { |actor| actor.alive? }\n end\n end\n @item_max = @targets.size\n create_contents\n refresh\n show\n activate\n end", "def action; end", "def action; end", "def action; end", "def action; end", "def action; end", "def workflow\n end", "def revisable_shared_setup(args, block)\n class << self\n attr_accessor :revisable_options\n end\n options = args.extract_options!\n self.revisable_options = Options.new(options, &block)\n \n self.send(:include, Common)\n self.send(:extend, Validations) unless self.revisable_options.no_validation_scoping?\n self.send(:include, WithoutScope::QuotedColumnConditions)\n end", "def setup\n @action = SampleActionAndroid.new(os_name: 'android',\n app_name: APP_PATH)\n end", "def before(action)\n invoke_callbacks *self.class.send(action).before\n end", "def process_action(...)\n send_action(...)\n end", "def before_dispatch(env); end", "def after_actions(*logic)\n self.after_actions = logic\n end", "def setup\n # override and do something appropriate\n end", "def setup(client)\n return unless @setup\n actions = @setup['setup'].select { |action| action['do'] }.map { |action| Action.new(action['do']) }\n actions.each do |action|\n action.execute(client)\n end\n self\n end", "def setup(_context)\n end", "def setup(resources) ; end", "def validate_actions\n errors.add(:base, :should_give_at_least_one_action) if !manage? && !forecasting? && !read? && !api?\n end", "def setup\n @resource_config = {\n :callbacks => {\n :before_create => nil,\n :after_create => nil,\n :before_update => nil,\n :after_update => nil,\n :before_destroy => nil,\n :after_destroy => nil,\n },\n :child_assoc => nil,\n :model => nil,\n :parent => nil,\n :path => nil,\n :permission => {},\n :properties => {},\n :relation => {\n :create => nil,\n :delete => nil,\n },\n :roles => nil,\n }\n end", "def determine_valid_action\n\n end", "def process_shared\n handle_taxes\n handle_shippings\n create_adjustments_from_params\n handle_status\n handle_inventory_refunds\n handle_payment_transactions\n order.updater.update\n end", "def startcompany(action)\n @done = true\n action.setup\n end", "def init_actions\n am = action_manager()\n am.add_action(Action.new(\"&Disable selection\") { @selection_mode = :none; unbind_key(32); bind_key(32, :scroll_forward); } )\n am.add_action(Action.new(\"&Edit Toggle\") { @edit_toggle = !@edit_toggle; $status_message.value = \"Edit toggle is #{@edit_toggle}\" })\n end", "def event_callbacks(event, metadata={})\n case event\n when :reset, :review\n if confirmed\n update_attributes(confirmed: false)\n end\n when :confirm\n confirm\n # trigger :order for all applicable items\n # NOTE: :order event is common to both physical and digital items\n items.each do |i|\n if i.event_permitted(:order)\n user_id = last_transition.user_id\n i.trigger!(:order, { order_id: id, user_id: user_id })\n end\n end\n when :complete_work\n request = metadata[:request]\n work_complete_notification(request)\n when :close\n close\n end\n if event != :close && !open\n reopen\n end\n end", "def setup_action\n return unless PONY::ERRNO::check_sequence(current_act)\n new_sequence = @action_sequence[@sequence_index+1...@action_sequence.size]\n @sequence_index = 0\n new_sequence = DND::SkillSequence::ACTS[@acts[1]] + new_sequence\n execute_sequence\n end", "def define_tasks\n define_weave_task\n connect_common_tasks\n end", "def setup(&block)\n define_method(:setup, &block)\n end", "def setup\n transition_to(:setup)\n end", "def setup\n transition_to(:setup)\n end", "def action\n end", "def setup( *args )\n\t\t\tself.class.setupBlocks.each {|sblock|\n\t\t\t\tdebugMsg \"Calling setup block method #{sblock}\"\n\t\t\t\tself.send( sblock )\n\t\t\t}\n\t\t\tsuper( *args )\n\t\tend", "def config(action, *args); end", "def setup\n @setup_proc.call(self) if @setup_proc\n end", "def before_action \n end", "def setup_callbacks\n defined_callbacks.each do |meth|\n unless respond_to?(\"call_#{meth}_callbacks\".to_sym)\n self.class.module_eval <<-EOE\n def call_#{meth}_callbacks(*args)\n plugin_store.each {|a| a.call_#{meth}_callbacks(*args) } if respond_to?(:plugin_store) && plugin_store\n self.send :#{meth}, *args if respond_to?(:#{meth})\n end\n EOE\n end\n end\n end", "def action\n end", "def matt_custom_action_begin(label); end", "def setup\n # override this if needed\n end", "def setup\n\t\t\t\t\t\t# Do nothing\n\t\t\t\tend", "def setup\n\t\t\t\t\t\t# Do nothing\n\t\t\t\tend", "def action(options,&callback)\n new_action = Action===options ? options : Action.new(options,&callback)\n # replace any with (shared name/alias or both default) + same arity\n @actions.delete_if do |existing_action|\n ((existing_action.names & new_action.names).size > 0 ||\n existing_action.default? && new_action.default?) &&\n existing_action.required.size == new_action.required.size &&\n existing_action.optional.size <= new_action.optional.size\n end\n @actions = (@actions + [new_action]).sort\n new_action\n end", "def set_target_and_action target, action\n self.target = target\n self.action = 'sugarcube_handle_action:'\n @sugarcube_action = action\n end", "def after(action)\n invoke_callbacks *options_for(action).after\n end", "def pre_task\n end", "def setup(server)\n server.on('beforeMethod', method(:before_method), 10)\n end", "def add_actions\n attribute = machine.attribute\n name = self.name\n \n owner_class.class_eval do\n define_method(name) {self.class.state_machines[attribute].events[name].fire(self)}\n define_method(\"#{name}!\") {self.class.state_machines[attribute].events[name].fire!(self)}\n define_method(\"can_#{name}?\") {self.class.state_machines[attribute].events[name].can_fire?(self)}\n end\n end", "def init_actions\n @select_action = SelectAction.new\n @endpoint_mouse_action = EndpointMouseAction.new\n @move_action = MoveAction.new\n end", "def setup_signals; end", "def after_created\r\n return unless compile_time\r\n Array(action).each do |action|\r\n run_action(action)\r\n end\r\nend", "def after_created\r\n return unless compile_time\r\n Array(action).each do |action|\r\n run_action(action)\r\n end\r\nend", "def set_target_and_action target, action\n self.target = target\n self.action = 'sugarcube_handle_action:'\n @sugarcube_action = action.respond_to?('weak!') ? action.weak! : action\n end", "def initialize(*args)\n super\n @action = :set\nend", "def after_set_callback; end", "def setup\n #implement in subclass;\n end", "def lookup_action; end", "def setup &block\n if block_given?\n @setup = block\n else\n @setup.call\n end\n end", "def setup_action\n return TSBS.error(@acts[0], 1, @used_sequence) if @acts.size < 2\n actions = TSBS::AnimLoop[@acts[1]]\n if actions.nil?\n show_action_error(@acts[1])\n end\n @sequence_stack.push(@acts[1])\n @used_sequence = @acts[1]\n actions.each do |acts|\n @acts = acts\n execute_sequence\n break if @break_action\n end\n @sequence_stack.pop\n @used_sequence = @sequence_stack[-1]\n end", "def release_actions; end", "def around_hooks; end", "def save_action; end", "def setup(easy)\n super\n easy.customrequest = @verb\n end", "def action_target()\n \n end", "def setup\n callback(:setup) do\n notify(:setup)\n migration_check.last_deployed_commit\n end\n end", "def setup\n return unless @setup\n\n actions = @setup['setup'].select { |action| action['do'] }.map { |action| Action.new(action['do']) }\n run_actions_and_retry(actions)\n self\n end", "def before_setup\n # do nothing by default\n end", "def my_actions(options)\n @setup = false\n get_template_part(\"custom_used\",\"action_users\",true)\n end", "def default_action; end", "def setup(&blk)\n @setup_block = blk\n end", "def callback_phase\n super\n end", "def advice\n end", "def _handle_action_missing(*args); end", "def duas1(action)\n action.call\n action.call\nend", "def shared_action(name, &block)\n @controller.shared_actions[name] = block\n end", "def before_action action, &block\n @audience[:before][action] ||= Set.new\n @audience[:before][action] << block\n end", "def setup_initial_state\n\n state_a = State.new(\"a\", 0)\n state_b = State.new(\"b\", 0)\n state_c = State.new(\"c\", 10)\n\n move_to_b = Action.new(\"move_to_b\", 1, state_b)\n\n move_to_c = Action.new(\"move_to_c\", 1, state_c)\n\n state_a.actions = [move_to_b, move_to_c]\n\n return state_a\n \nend" ]
[ "0.6163163", "0.6045976", "0.5946146", "0.591683", "0.5890051", "0.58349305", "0.5776858", "0.5703237", "0.5703237", "0.5652805", "0.5621621", "0.54210985", "0.5411113", "0.5411113", "0.5411113", "0.5391541", "0.53794575", "0.5357573", "0.53402257", "0.53394014", "0.53321576", "0.53124547", "0.529654", "0.5296262", "0.52952296", "0.52600986", "0.52442724", "0.52385926", "0.52385926", "0.52385926", "0.52385926", "0.52385926", "0.5232394", "0.523231", "0.5227454", "0.52226824", "0.52201617", "0.5212327", "0.52079266", "0.52050185", "0.51754695", "0.51726824", "0.51710224", "0.5166172", "0.5159343", "0.51578903", "0.51522785", "0.5152022", "0.51518047", "0.51456624", "0.51398855", "0.5133759", "0.5112076", "0.5111866", "0.5111866", "0.5110294", "0.5106169", "0.509231", "0.50873137", "0.5081088", "0.508059", "0.50677156", "0.50562143", "0.5050554", "0.50474834", "0.50474834", "0.5036181", "0.5026331", "0.5022976", "0.5015441", "0.50121695", "0.5000944", "0.5000019", "0.4996878", "0.4989888", "0.4989888", "0.49864885", "0.49797225", "0.49785787", "0.4976161", "0.49683493", "0.4965126", "0.4958034", "0.49559742", "0.4954353", "0.49535993", "0.4952725", "0.49467874", "0.49423352", "0.49325448", "0.49282882", "0.49269363", "0.49269104", "0.49252945", "0.4923091", "0.49194667", "0.49174926", "0.49173003", "0.49171105", "0.4915879", "0.49155936" ]
0.0
-1
Never trust parameters from the scary internet, only allow the white list through.
def incidentfile_params params.require(:incidentfile).permit(:incident_id, :filetype, :state) end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strong_params\n params.require(:user).permit(param_whitelist)\n end", "def strong_params\n params.require(:listing_member).permit(param_whitelist)\n end", "def allow_params_authentication!; end", "def allowed_params\n ALLOWED_PARAMS\n end", "def default_param_whitelist\n [\"mode\"]\n end", "def param_whitelist\n [:role, :title]\n end", "def expected_permitted_parameter_names; end", "def safe_params\n params.except(:host, :port, :protocol).permit!\n end", "def strong_params\n params.require(:team_member).permit(param_whitelist)\n end", "def permitir_parametros\n \t\tparams.permit!\n \tend", "def strong_params\n params.require(:community).permit(param_whitelist)\n end", "def permitted_strong_parameters\n :all #or an array of parameters, example: [:name, :email]\n end", "def strong_params\n params.require(:education).permit(param_whitelist)\n end", "def restricted_params\n #params.require(self.controller_name.classify.underscore.to_sym).permit([])\n raise(\"No strong params set, override restricted_params method in your controller. E.g. params.require(:model).permit(:attribute1, :attribute2)\")\n end", "def allowed_params\n params.require(:user).permit(:username, :email, :password, :password_confirmation)\n end", "def param_whitelist\n [:rating, :review]\n end", "def param_whitelist\n whitelist = [\n :username, :name,\n :parent_id,\n :headline, :description, :video,\n :policy, :signup_mode, :category,\n :website, :facebook, :twitter, :linkedin,\n :founded_at,\n privacy: [\n :events,\n :resources\n ],\n permission: [\n :profile,\n :members,\n :children,\n :statistics,\n :posts,\n :listings,\n :resources,\n :events\n ],\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n \n if action_name === 'update'\n whitelist.delete(:parent_id)\n unless current_user.role_in(@community) === 'owner'\n whitelist.delete(:privacy)\n whitelist.delete(:permission)\n end\n end\n \n whitelist\n end", "def param_whitelist\n if @user.present? && current_user != @user\n return [:followed]\n end\n \n whitelist = [\n :username, :email, :password,\n :first_name, :last_name,\n :birthday, :gender,\n :headline, :biography, :ask_about, :focus,\n :website, :facebook, :linkedin, :twitter, :github,\n roles: [],\n skills: [],\n interests: [],\n privacy: { contact: [] },\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n \n if action_name === 'update'\n whitelist.delete(:email)\n whitelist.delete(:password)\n end\n \n whitelist\n end", "def user_params \n \tparams.require(:user).permit(:name, :email, :password, :password_confirmation)# preventing CSTR\n end", "def user_params\n params.permit(:name, :phoneNumber, :address, :postalCode, :local, :link, :counter, :latitude, :longitude) \n end", "def valid_params_request?; end", "def strong_params\n params.require(:experience).permit(param_whitelist)\n end", "def trim_whitelisted(params, whitelist)\n # remove any parameters that are not whitelisted\n params.each do |key, value|\n # if white listed\n if whitelist.include? key\n # strip the parameters of any extra spaces, save as string\n params[key] = value.to_s.strip\n else\n # delete any unauthorized parameters\n params.delete key\n end\n end\n params\n end", "def whitelist_url_params\n params.require(:whitelist_url).permit(:domain)\n end", "def allowed_params\n params.require(:allowed).permit(:email)\n end", "def permitted_params\n []\n end", "def trim_whitelisted(params, whitelist)\n # remove any parameters that are not whitelisted\n params.each do |key, value|\n # if white listed\n if whitelist.include? key\n # strip the parameters of any extra spaces, save as string\n params[key] = value.to_s.strip\n else\n # delete any unauthorized parameters\n params.delete key\n end\n end\n params\n end", "def safe_params\n params.permit(:id, :name, :origin, :emails => []); #emails is an array\n end", "def query_param\n\t\tparams.permit(:first_name, :last_name, :phone)\n\tend", "def strong_params\n params.require(:success_metric).permit(param_whitelist)\n end", "def devise_filter\r\n logger.debug(\"In devise_filter =>PARAMS: #{params.inspect}\")\r\n\r\n # White list for sign_up\r\n devise_parameter_sanitizer.for(:sign_up) { |u| u.permit(user_whitelist) }\r\n\r\n # White list for account update\r\n devise_parameter_sanitizer.for(:account_update) { |u| u.permit(user_whitelist, :current_password) }\r\n\r\n # White list for Invitation creation\r\n devise_parameter_sanitizer.for(:invite) { |u| u.permit(:account_type, :email, :invitation_token)}\r\n\r\n # White list for accept invitation\r\n devise_parameter_sanitizer.for(:accept_invitation) { |u| u.permit(user_whitelist, :invitation_token)}\r\n\r\n end", "def whitelisted_user_params\n params.require(:user).\n permit( :first_name, :last_name, :email,:password,:password_confirmation,:birthday,:gender)\n end", "def user_params\n ActionController::Parameters.permit_all_parameters = true\n params.require(:user) #.permit(:name, :surname, :phone, :password, :email, :time_zone)\n end", "def strong_params\n params.require(:metric_change).permit(param_whitelist)\n end", "def safe_params\n params.require(:user).permit(:name)\n end", "def get_params\n\t\treturn ActionController::Parameters.new(self.attributes).permit(\"account_id\", \"title\", \"category\", \"introduction\", \"tags\", \"segment_type\", \"visible\", \"status\", \"main_image\")\n\tend", "def grant_params\n @whitelisted = params.require(:grant).permit(:name, :description, :agency_id, :acronym)\n end", "def check_params; true; end", "def param_whitelist\n whitelist = [\n :description,\n :progress,\n :kpi_id\n ]\n \n unless action_name === 'create'\n whitelist.delete(:kpi_id)\n end\n \n whitelist\n end", "def quote_params\n params.permit!\n end", "def valid_params?; end", "def paramunold_params\n params.require(:paramunold).permit!\n end", "def user_params\n\t\tparams.permit(:nickname, :avatar, :description, :password, :gender, :birthday, :email, :phone, :qq_id, :wechat_id)\n\tend", "def filtered_parameters; end", "def user_params\n params.permit(\n \t:id,\n \t:email, \n \t:first_name, \n \t:last_name, \n \t:password, \n \t:confirm_token, \n \t:phone_number,\n \t:facebook_link,\n \t:car_model,\n \t:license_plate)\n end", "def filtering_params\n params.permit(:email, :name)\n end", "def check_params\n true\n end", "def wx_public_params\n params.require(:wx_public).permit(:nickname, :manager, :alias)\n end", "def allowed_params\n params.require(:user).permit(:email, :password, :role, :first_name, :last_name, :password_confirmation)\n end", "def allowed_params\n params.require(:user).permit(:email, :password, :role, :first_name, :last_name, :password_confirmation)\n end", "def listing_params\n\t\tparams.permit(:address, :transit_info, :rules, :other_info, :lat, :lng)\n\tend", "def social_account_params\n\t\t\tparams.require(:social_account).permit!\n\t\tend", "def safe_params\n resurce_name = self.class.resource_name\n params_method_name = \"#{resurce_name}_params\".to_sym\n if params[resurce_name]\n if respond_to?(params_method_name) || private_methods.include?(params_method_name)\n send(params_method_name)\n else\n raise ActiveModel::ForbiddenAttributesError, \"Please, define the '#{params_method_name}' method in #{self.class.name}\"\n end\n end\n end", "def url_params\n params.require(:url).permit(:short_url, :original_url, :clicks, :ip_addresses)\n end", "def user_params\n params.require(:user).permit(:uri, :username, :password, :realname, :email, :publicvisible)\n end", "def model_params\n\t\tparams.require(:manager).permit(\n\t :user_name,\n :password,\n :email,\n \t\t\t)\n\tend", "def article_params_whitelist\n params.require(:article).permit(:title, :description, category_ids: [])\n end", "def college_whitelist_params\n params.require(:college_whitelist).permit(:status)\n end", "def active_code_params\n params[:active_code].permit\n end", "def filtering_params\n params.permit(:email)\n end", "def valid_params(params)\n params.permit(:user_id, :photo_id, :originX, :originY, :width, :height)\n end", "def ip_address_params\n\t\t\tparams.require(:ip_address).permit!\n end", "def pull_request_params\n whitelist = [\n :url,\n :id,\n :html_url,\n :diff_url,\n :patch_url,\n :issue_url,\n :number,\n :state,\n :locked,\n :title\n ]\n params.require(:pull_request).permit(whitelist)\n end", "def reserved_params\n params.require(:reserved).permit(:name, :email, :pax, :address, :KTP, :title)\n end", "def post_params\n if current_user.admin? \n params.permit(:title, :body, :city, :country, :gps_location, :privacy, :visible, :latitude, :longitude, images: [], files: [])\n else \n params.permit(:title, :body, :city, :country, :gps_location, :privacy,:latitude, :longitude, images: [], files: [])\n end \n end", "def list_params\n params.permit(:name)\n end", "def filter_parameters; end", "def filter_parameters; end", "def vineyard_params\n params.permit(:vineyard_name, :email, :website_url, :phone, :address, :city, :region, :postcode, :country, :specialty, :description, :pet_friendly, :holiday, :tours, :events, :family_friendly, :cover_image, :image_one, :image_two, :image_three, :image_four, :user_id, :base64)\n end", "def available_activity_params\n # params.require(:available_activity).permit(:type,:geometry,:properties)\n whitelisted = ActionController::Parameters.new({\n type: params.require(:available_activity)[:type],\n geometry: params.require(:available_activity)[:geometry].try(:permit!).to_h,\n properties: params.require(:available_activity)[:properties].try(:permit!).to_h\n }).try(:permit!)\n end", "def user_params\n params.permit(:name, :username, :email, :password, :img_url, :bg_url, :coinbank)\n end", "def user_params_pub\n\t \tparams[:user].permit(:hruid)\n\t end", "def user_params\n params.permit(:id, :email, :password, :nickname, :status, :avatar, :flat_picture, :flatsharing_id, :member,\n :user, :color, :solde)\n end", "def validate_search_inputs\n @whitelisted = params.fetch(:user, nil)\n if @whitelisted.blank?\n render_error(400, \"#{I18n.t('general_error.params_missing_key')}\": [I18n.t('general_error.params_missing_value', model: \"review\")])\n return\n else\n @whitelisted = @whitelisted.permit(:name, :uen, :description)\n end\n end", "def param_whitelist\n [\n :title,\n :description,\n :organization,\n :team_id,\n :started_at,\n :finished_at,\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n end", "def url_whitelist; end", "def admin_social_network_params\n params.require(:social_network).permit!\n end", "def filter_params\n params.require(:filters).permit(:letters)\n end", "def origin_params\n params.permit(:country, :state, :city, :postal_code, :address, :description)\n end", "def valid_params(params)\n params.permit(:login, :first_name, :last_name, \n :password, :password_confirmation)\n end", "def sensitive_params=(params)\n @sensitive_params = params\n end", "def permit_request_params\n params.permit(:address)\n end", "def user_params\n # Ensure a user can't give themselves admin priveleges\n params.delete(:admin) if current_user.admin?\n params.require(:user).permit(:name, :email, :admin, :image)\n end", "def secure_params\n params.require(:location).permit(:name)\n end", "def strong_params\n params.require( :setting ).\n permit( :global_scan_limit, :per_user_scan_limit,\n :target_whitelist_patterns, :target_blacklist_patterns )\n end", "def question_params\n params.require(:survey_question).permit(question_whitelist)\n end", "def case_insensitive_params\n params.require(:case_insensitive).permit(:name)\n end", "def empire_master_no_match_params\n params.require(:empire_master_no_match).permit(:uid, :last_name, :list, :search_date, :double, :source)\n end", "def maintenance_request_params\n params[:maintenance_request].permit! #allow all parameters for now\n end", "def unwanted_params\n params.require(:unwanted).permit(:title, :description, :image)\n end", "def url_params\n params[:url].permit(:full)\n end", "def backend_user_params\n params.permit!\n end", "def filter_params\n\t\treturn params[:candidate].permit(:name_for_filter)\n\tend", "def speed_measurement_params\n\n #fuckit, to lazy to deal with permit crap right now\n ActionController::Parameters.permit_all_parameters = true\n\n params[:speed_measurement]\n end", "def user_params\n params.permit(:name, :age, :username, :display_photo, :password)\n end", "def get_params\r\n #params.require(:article).permit(:title, :permalink, :content, :source_site, :introtext, :type_id, :order_by, :searchable, :created_by, :edited_by, :published_by, :published_on, :user_id)\r\n params.require(:article).permit!\r\n\r\n end", "def pub_params\n params.require(:pub).permit(:name, :description, :phone, :email, :hidden, :city_id, :address)\n end", "def pass_params\n params[:pass].permit(:name, :price, :description, :colour, :events)\n end", "def droptraining_params\n params.permit(:training_id,:user_id, :utf8, :authenticity_token, :commit)\n end", "def person_params\n # params whitelist does *not* include admin, sub, remember_token\n # TBD: share this whitelist with the list used by configuration_permitted_parameters\n # TBD: should current_password be on this list? -- for now, leaving off, since it seems to work without\n # NOTE: do not include 'admin' in this list!\n params.require(:person).permit(\n :name, \n :email, \n :description,\n :password, \n :password_confirmation\n )\n end", "def parameter_params\n params.require(:parameter).permit(:name, :description, :param_code, :param_value, :active_from, :active_to)\n end" ]
[ "0.69792545", "0.6781151", "0.67419964", "0.674013", "0.6734356", "0.6591046", "0.6502396", "0.6496313", "0.6480641", "0.6477825", "0.64565", "0.6438387", "0.63791263", "0.63740575", "0.6364131", "0.63192815", "0.62991166", "0.62978333", "0.6292148", "0.6290449", "0.6290076", "0.62894756", "0.6283177", "0.6242471", "0.62382483", "0.6217549", "0.6214457", "0.6209053", "0.6193042", "0.6177802", "0.6174604", "0.61714715", "0.6161512", "0.6151757", "0.6150663", "0.61461", "0.61213595", "0.611406", "0.6106206", "0.6105114", "0.6089039", "0.6081015", "0.6071004", "0.60620916", "0.6019971", "0.601788", "0.6011056", "0.6010898", "0.6005122", "0.6005122", "0.6001556", "0.6001049", "0.59943926", "0.5992201", "0.59909594", "0.5990628", "0.5980841", "0.59669393", "0.59589154", "0.5958826", "0.5957911", "0.5957385", "0.5953072", "0.59526145", "0.5943361", "0.59386164", "0.59375334", "0.59375334", "0.5933856", "0.59292704", "0.59254247", "0.5924164", "0.59167904", "0.59088355", "0.5907542", "0.59064597", "0.5906243", "0.5898226", "0.589687", "0.5896091", "0.5894501", "0.5894289", "0.5891739", "0.58860534", "0.5882406", "0.587974", "0.58738774", "0.5869024", "0.58679986", "0.5867561", "0.5865932", "0.5864461", "0.58639693", "0.58617616", "0.5861436", "0.5860451", "0.58602303", "0.5854586", "0.58537364", "0.5850427", "0.5850199" ]
0.0
-1
=begin car ship plane train ["car", "ship", "plane", "submarine", "train"] =end
def next2 vehicles = ["car", "ship", "plane", "submarine", "train"] i = 0 while i < vehicles.size v = vehicles[i] i += 1 next if v.length == 5 puts v end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pariculars\n\t\t[\"Concentration\", \"Concept\", \"Participation\", \"Completion of Task\"]\n\tend", "def arith(b,opt=\"M\") return \"@SP\\nAM=M-1\\nD=M\\nA=A-1\\n\"+opt+\"=M\"+b+\"D\\n\" end", "def draw_me_a_train\n puts %Q{ .---- - -\n ( ,----- - -\n \\_/ ___\n c--U---^--'o [_\n |------------'_| MTA ROUTE OPTIMIZER\n /_(o)(o)--(o)(o) *******************\n ~ ~~~~~~~~~~~~~~~~~~~~~~~~\n }\nend", "def |(parslet); end", "def |(parslet); end", "def setup\n # 原子のないセル。\n vectors00 = [[2.0, 2.0, 2.0], [0.0, 2.0, 2.0], [0.0, 0.0, 2.0]]\n axes00 = CrystalCell::LatticeAxes.new([[2.0, 2.0, 2.0], [0.0, 2.0, 2.0], [0.0, 0.0, 2.0]])\n @c00 = CrystalCell::Cell.new(axes00)\n @c00.comment = 'c00'\n\n # 元素の識別子を数字にしたもの。\n atoms = [\n CrystalCell::Atom.new( 0, [0.0, 0.0, 0.0] ),\n CrystalCell::Atom.new( 1, [0.1, 0.2, 0.3] ),\n ]\n @c01 = CrystalCell::Cell.new(axes00, atoms)\n @c01.comment = 'c01'\n\n # Li と O を1つずつ入れたセル。\n # @c02 = CrystalCell::Cell.new( [ [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0] ] )\n atoms = [\n CrystalCell::Atom.new( 'Li', [0.0, 0.0, 0.0], 'Li1' ),\n CrystalCell::Atom.new( 'O' , [0.2, 0.2, 0.2], 'O1' ),\n ]\n @c02 = CrystalCell::Cell.new(vectors00, atoms)\n @c02.comment = 'c02'\n\n # 原子の順序を逆にしたもの。\n atoms = [\n CrystalCell::Atom.new( 'O' , [0.2, 0.2, 0.2] ),\n CrystalCell::Atom.new( 'Li', [0.0, 0.0, 0.0] ),\n ]\n @c03 = CrystalCell::Cell.new(vectors00, atoms)\n @c03.comment = 'c03'\n\n # 原子の順序がいりまじったもの\n atoms = [\n CrystalCell::Atom.new( 'Li', [0.0, 0.0, 0.0] ),\n CrystalCell::Atom.new( 'O' , [0.2, 0.2, 0.2] ),\n CrystalCell::Atom.new( 'Li', [0.1, 0.2, 0.3] ),\n ]\n @c04 = CrystalCell::Cell.new(vectors00, atoms)\n @c04.comment = 'c04'\n\n # 原子が不足しているもの。\n atoms = [\n CrystalCell::Atom.new( 'Li', [0.0, 0.0, 0.0] ),\n ]\n @c05 = CrystalCell::Cell.new(vectors00, atoms)\n @c05.comment = 'c05'\n\n # Selective dynamics をいれたもの。\n atoms = [\n CrystalCell::Atom.new( 'Li', [0.0, 0.0, 0.0], nil, [true, false, false ] ),\n CrystalCell::Atom.new( 'O' , [0.2, 0.2, 0.2] ),\n ]\n @c06 = CrystalCell::Cell.new(vectors00, atoms)\n @c06.comment = 'c06'\n\n # 元素の識別子を数字にしたもの。\n atoms = [\n CrystalCell::Atom.new( 0, Mageo::Vector3DInternal[0.0, 0.0, 0.0] ),\n CrystalCell::Atom.new( 1, Mageo::Vector3DInternal[0.2, 0.2, 0.2] ),\n ]\n @c07 = CrystalCell::Cell.new(vectors00, atoms)\n @c07.comment = 'c07'\n\n # セル外の座標の原子を追加。\n atoms = [\n CrystalCell::Atom.new( 'Li', [ 1.2, 3.4, 5.6], \"atom0\", [ false, false, true] ),\n CrystalCell::Atom.new( 'O', [-1.2, -3.4, -5.6] ),\n ]\n @c08 = CrystalCell::Cell.new(vectors00, atoms)\n @c08.comment = 'c08'\n\n #cubic\n axes = CrystalCell::LatticeAxes.new(\n [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]\n )\n atoms = [\n CrystalCell::Atom.new( 0, [0.0, 0.0, 0.0] )\n ]\n @c10 = CrystalCell::Cell.new( axes, atoms)\n @c10.comment = 'cubic'\n\n #hexagonal\n axes = CrystalCell::LatticeAxes.new(\n [\n [0.86602540378443864676, 0.5, 0.0],\n [0.0, 1.0, 0.0],\n [0.0, 0.0, 1.0],\n ]\n )\n atoms = [\n CrystalCell::Atom.new( 0, [0.0, 0.0, 0.0] )\n ]\n @c11 = CrystalCell::Cell.new( axes, atoms)\n @c11.comment = 'hexagonal'\n\n #monoclinic\n axes = CrystalCell::LatticeAxes.new(\n [ [1.5, 1.4, 0.0],\n [0.0, 1.2, 0.0],\n [0.0, 0.0, 1.0],\n ]\n )\n atoms = [\n CrystalCell::Atom.new( 0, [0.0, 0.0, 0.0]),\n #CrystalCell::Atom.new( 0, [0.1, 0.2, 0.3]),\n #CrystalCell::Atom.new( 0, [0.2, 0.3, 0.4]),\n ]\n @c12 = CrystalCell::Cell.new( axes, atoms)\n @c12.comment = 'monoclinic'\n\n #orthorhombic\n axes = CrystalCell::LatticeAxes.new(\n [\n [3.0, 0.0, 0.0],\n [0.0, 2.0, 0.0],\n [0.0, 0.0, 1.0],\n ]\n )\n atoms = [\n CrystalCell::Atom.new( 0, [0.0, 0.0, 0.0])\n ]\n @c13 = CrystalCell::Cell.new( axes, atoms)\n @c13.comment = 'orthorhombic'\n\n #tetragonal\n axes = CrystalCell::LatticeAxes.new(\n [\n [2.0, 0.0, 0.0],\n [0.0, 2.0, 0.0],\n [0.0, 0.0, 1.0],\n ]\n )\n atoms = [\n CrystalCell::Atom.new( 0, [0.0, 0.0, 0.0], )\n ]\n @c14 = CrystalCell::Cell.new( axes, atoms)\n @c14.comment = 'tetragonal'\n\n #tetragonal-b\n axes = CrystalCell::LatticeAxes.new(\n [\n [2.0, 0.0, 0.0],\n [0.0, 1.0, 0.0],\n [0.0, 0.0, 2.0],\n ]\n )\n atoms = [\n CrystalCell::Atom.new( 0, [0.0, 0.0, 0.0], )\n ]\n @c14b = CrystalCell::Cell.new( axes, atoms)\n @c14b.comment = 'tetragonal-b'\n\n #triclinic\n axes = CrystalCell::LatticeAxes.new(\n [\n [1.5, 1.4, 1.3],\n [0.0, 1.2, 1.1],\n [0.0, 0.0, 1.0],\n ]\n )\n atoms = [\n CrystalCell::Atom.new( 0, [0.0, 0.0, 0.0] ),\n CrystalCell::Atom.new( 0, [0.1, 0.2, 0.3] ),\n CrystalCell::Atom.new( 0, [0.2, 0.3, 0.4] ),\n ]\n @c15 = CrystalCell::Cell.new( axes, atoms)\n @c15.comment = 'triclinic'\n\n #trigonal\n axes = CrystalCell::LatticeAxes.new(\n [\n [0.86602540378443864676, 0.5, 0.0],\n [0.0, 1.0, 0.0],\n [0.0, 0.0, 1.0],\n ]\n )\n atoms = [\n CrystalCell::Atom.new( 0, [0.0, 0.0, 0.0]),\n CrystalCell::Atom.new( 0, [0.333333333333333, 0.333333333333333, 0.333333333333333] ),\n ]\n @c16 = CrystalCell::Cell.new( axes, atoms)\n @c16.comment = 'trigonal'\n end", "def generate\n horizontal_words + vertical_words\n end", "def above_fold_terms\n [:maker,\n :date_original,\n :date_published,\n :resource_type,\n :genre_string,\n :identifier,\n :rights,\n ]\n end", "def mta\n {\n :N => [\"Times Square\", \"34th\", \"28th\", \"23rd\", \"Union Square\", \"8th\"], \n :L => [\"8th\", \"6th\", \"Union Square\", \"3rd\", \"1st\"],\n :L6 => [\"Grand Station\", \"33rd\", \"28th\", \"23rd\", \"Union Square\", \"Astor Place\"]\n }\nend", "def subway\n{\n \"n\" => [\"Times Square\", \"34th\", \"28th\", \"23rd\", \"Union Square\", \"8th\"],\n \"l\" => [\"8th\", \"6th\", \"Union Square\", \"3rd\", \"1st\"],\n \"six\" => [\"Grand Central\", \"33rd\", \"28th\", \"23rd\", \"Union Square\", \"Astor Place\"]\n }\nend", "def layover_codes\n %w[ATL ORD DFW DEN]\n end", "def sale\r\nshirts = [\"Iron Man\", \"Thor\", \"Hulk\", \"Captain America\"]\r\n\r\nend", "def mta\n {\n\t:line_N => [ \"Times Square\", \"34th\", \"28th\", \"23rd\", \"Union Square\", \"8th\" ],\n\t:line_L => [ \"8th\", \"6th\", \"Union Square\", \"3rd\", \"1st\" ],\n\t:line_6 => [ \"Grand Central\", \"33rd\", \"28th\", \"23rd\", \"Union Square\", \"Astor Place\" ]\n }\nend", "def build_bag\n [:red] * 3 + [:blue] * 4\nend", "def direction(compass, trainlist)\n nsew = []\n\n trainlist.each do |train|\n if train[:direction] == compass\n nsew << train[:train]\n end\n end\n return nsew\nend", "def roads_and_libraries(n, c_lib, c_road, cities)\n \nend", "def cigaret_tar\n Unitwise(0.00001, 'kilogram')\n end", "def testComprehension\n=begin\n yin = proc do |x|\n Transcript.cr\n x\n end.call(Continuation.current)\n\n yang = proc do |x|\n Transcript.nextPut('*')\n x\n end.call(Continuation.current)\n\n yin.call(yang)\n=end\n end", "def start\n years\n strings\n sgb\n end", "def placement\n\t\t[\n\t ['Champ', 16],\n\t ['Runner-up', 12],\n\t ['Third', 10],\n\t ['Fourth', 9],\n\t ['Fifth', 7],\n\t ['Sixth', 6],\n\t ['Seventh', 4],\n\t ['Eighth', 3]\n\t]\n\n\tend", "def plan_trip (first_l, first_s, last_l, last_s)\n# Get the program to work for a single line:\n# Different way to do global use $\n stations = [ ]\n start = $train_lines[first_l.to_s].index(first_s.to_s)\n finish = $train_lines[last_l.to_s].index(last_s.to_s)\n\n# 2.7.2 :012 > $train_lines.values\n# => [[\"Times Square\", \"34th\", \"28th\", \"23rd\", \"Union Square\", \"8th\"], [\"8th\", \"6th\", \"Union Square\", \"3rd\", \"1st\"], [\"Grand Central\", \"33rd\", \"28th\", \"23rd\", \"Union Square\", \"Astor Place\"]]\n# 2.7.2 :013 > $train_lines.keys\n# => [\"lineN\", \"lineL\", \"line6\"]\n\n if start < finish\n stations = $lineN[start..finish]\n elsif\n stations = $lineN[finish..start].reverse\n end\n\n return stations\n\nend", "def tab_carrot(l)\n \"^\" * l\nend", "def victoire\n \n [[@a1, @a2, @a3],\n [@a1, @b2, @c3],\n [@a1, @b1, @c1],\n [@b1, @b2, @b3],\n [@c1, @c2, @c3],\n [@c1, @b2, @a3],\n [@a2, @b2, @c2],\n [@a3, @b3, @c3]]\n end", "def test_verse_range_and_separated_verse\n text = 'Ruth 2,1-3.11'\n t1, t2 = text.split(dot)\n assert_formated_text_for_ast text, [pass(text: t1, b1: :Ruth, c1: 2, v1: 1, b2: :Ruth, c2: 2, v2: 3), dot, pass(text: t2, b1: :Ruth, c1: 2, v1: 11, b2: :Ruth, c2: 2, v2: 11)]\n end", "def label_and_parafilm\n show do \n title \"Label and Parafilm\"\n \n plates_to_parafilm = operations.reject { |op| op.temporary[:delete] || op.temporary[:re_incubate] }.map { |op| op.input(\"Plate\").item.id }\n note \"Perform the steps with the following plates: #{plates_to_parafilm.join(\",\")}\"\n note \"Label the plates with their item ID numbers on the side, and parafilm each one.\"\n note \"Labelling the plates on the side makes it easier to retrieve them from the fridge.\"\n end\n end", "def init_rubies\n @table = [\n [1, 1], # Enumerable Canyon ==> 0\n [1, 1], # Monkey Patch City ==> 1\n [2, 2], # Duck Type Beach ==> 2\n [3, 0], # Matzburg ==> 3\n [0, 3], # Nil Town ==> 4\n [2, 2], # Hash Crossing ==> 5\n [2, 2] # Dynamic Palisades ==> 6\n ]\n end", "def legionnairs\n\n end", "def car(brand, model)\n brand + ' ' + model\nend", "def wl_formula\n replace_nests do |i|\n # indices in Wolfram Language start with 1\n \"##{i + 1}\"\n end\n end", "def x______________STRUCTURE\r\nend", "def x______________STRUCTURE\r\nend", "def construct(w)\n @word = \"~#{w}\" # Assimilate!\n @length = @word.length - 1 # Do not count the ~.\n @back = Array.new\n @back << 0 \n @back << 0\n s = 0\n (2..@length).each do |i|\n s = step(s,@word[i - 1])\n @back << s\n end\n end", "def partial_instruction_of(splits)\n splits.inject('') do |string, sub_range|\n \"#{string}#{head_of(sub_range)};\"\n end\n end", "def termsig(*) end", "def lcts(array)\nend", "def print_combos(vector)\n \nend", "def create_sample_controlled_vocab_terms_attributes(array)\n attributes = []\n array.each do |type|\n attributes << { label: type }\n end\n attributes\nend", "def pan_body()\n self.zip((?a..?z).to_a).collect do |n, c|\n sprintf(\" %s%s %c%s\", \\\n (c==?z ? \"and \" : \"\"), \\\n n.to_en, c, \\\n (n>1) ? \"'s\" : \"\")\n end\n end", "def ct(t,w) u=t.length;r=w-(l=w/2-u/2)-u;' '*l+t+' '*r end", "def ii_groups; end", "def nasa_space_craft; end", "def union(lb)\n\n\n\n\n\n end", "def _print_ace(a,b,species)\n ace=String.new\n a.each{|_a|\n next if b.nil? || _a.nil? ||b.size<1\n ace << \"Gene : \\\"#{_a}\\\"\\n\"\n b.each{|_b|\n ace << \"Ortholog #{_b} \\\"#{species}\\\" Inferred_automatically OrthoMCL\\n\"\n }\n ace << \"\\n\"\n }\n return ace\nend", "def _print_ace(a,b,species)\n ace=String.new\n a.each{|_a|\n next if b.nil? || _a.nil? ||b.size<1\n ace << \"Gene : \\\"#{_a}\\\"\\n\"\n b.each{|_b|\n ace << \"Ortholog #{_b} \\\"#{species}\\\" Inferred_automatically OrthoMCL\\n\"\n }\n ace << \"\\n\"\n }\n return ace\nend", "def test_verse_range_and_separated_verse\n text = 'Ruth 2,1-3.11'\n t1, t2 = text.split(dot)\n assert_parsed_ast_for_text [pass(text: t1, b1: :Ruth, c1: 2, v1: 1, b2: :Ruth, c2: 2, v2: 3), dot, pass(text: t2, b1: :Ruth, c1: 2, v1: 11, b2: :Ruth, c2: 2, v2: 11)], text\n end", "def csr; sparam(5); end", "def subdivisions; end", "def folding_ranges; end", "def add_train(train)\n @trains << train\n end", "def complex_super_heroe\n heroes = [\"iron man\", \"hulk\", \"black widow\", \"thor\", \"captain marvel\"]\nend", "def represent\n # grid_array will be an array of strings that represent our grid in NxN format\n grid_array=[]\n @h.times{|r| grid_array<<@grid[r*@w,@w].split('')*' '}\n grid_array\n end", "def active_section_plane\n end", "def chain_rule\n output = []\n variables.each_with_index do |variable, i|\n j = i - 1\n given = j < 0 ? [] : variables[0..i - 1]\n output << {variable => given}\n end\n output\n end", "def car(make, model)\n \"#{make} #{model}\"\nend", "def car(make, model)\n \"#{make} #{model}\"\nend", "def car(make, model)\n \"#{make} #{model}\"\nend", "def constellation; end", "def category_vector(word)\n raise \"No vectors in *Cosine models\"\n end", "def match_maker(logic, *arrays)\n\t\tnew_arrays =[]\n\t\tarrays.each_slice(2){|a,b| new_arrays << [a,b]}\n\t\tputs new_arrays\n\t\tputs logic\n\tend", "def labels; end", "def ll_table\n\t\ttable = {}\n\t\tnon_terminals.each do |non_term|\n\t\t\ttable[non_term] = {}\n\t\t\tp_sets = predict_sets(non_term)\n\t\t\tp_sets.each_index do |i|\n\t\t\t\tp_sets[i].each do |term|\n\t\t\t\t\ttable[non_term][term] = i\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\n\t\treturn table\n\tend", "def k_array!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 60 )\n\n\n\n type = K_ARRAY\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 493:3: 'array'\n match( \"array\" )\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 60 )\n\n\n end", "def variety; end", "def variety; end", "def fill_train(train)\n 120.times { train.board(active_passenger, station) }\n end", "def modeler_description\r\n return \"A and B are the operands corresponding to the selected schedules A and B. The entered formula can use operators + - * and / any float numbers and at least one operand. Brackets are allowed. All Operations are carried minute by minute. The division is the only irregular operator; when the enumerator is an operand it's meaning is (schedule value != 0), so the output would be Boolean represented by zeroes and ones\"\r\n end", "def make_tabula_recta\n letters = @letter_set.dup\n table = []\n letters.length.times do\n table.push letters.dup\n letters.push letters.shift\n end\n indices = {}\n letters = letters.each_with_index { |letter, i| indices[letter] = i }\n [table, indices]\n end", "def lineL_stations \n puts \"The list of stations for line L: \"\n puts \"[0] - 8th\"\n puts \"[1] - 6th\"\n puts \"[2] - Union Square\"\n puts \"[3] - 3rd\"\n puts \"[4] - 1st\"\nend", "def addTerm(coeff, constr); end", "def car(company, type)\n company + \" \" + type\nend", "def cars(keys, values)\n\tkeys + values\nend", "def ship_list\n \"The Cruiser is three units long and the Submarine is two units long.\"\n end", "def line_n\n [\"Times Square\", \"34th\", \"28th\", \"23rd\", \"Union Square\", \"8th\"]\nend", "def relabel_comp_cell_tubes\n show do\n title \"Re-label all the competent cell tubes\"\n \n table operations.start_table\n .input_item(\"Parent\", heading: \"Old ID\")\n .output_item(\"Transformation\", heading: \"New ID\", checkable: true)\n .end_table\n end\n end", "def cannabinoid_abbreviation; end", "def prep_tubes(input_str, output_str, cell_lysis_str, method_str) \n y_overnights = operations.select {|op| op.input(input_str).object_type.name == \"Yeast Overnight Suspension\" }\n log_info 'y_overnights', y_overnights.each {|op| op.input(input_str).object_type.name}\n \n # Select which operations have specific extraction parameters in order to account for tubes needed in protocol\n zymo_tubes = operations.select {|op| op.input(cell_lysis_str).val == 'Enzymatic'}\n scrw_caps = operations.select {|op| op.input(input_str).object_type.name == 'Yeast Overnight Suspension'}.select {|op| op.input(cell_lysis_str).val == 'Mechanical'}\n ext_tubes = operations.select {|op| op.input(method_str).val == 'miRNeasy_Kit'}\n rneasy_cols = operations.select {|op| op.input(method_str).val == 'RNeasy_Kit'}\n mi_rneasy_cols = operations.select {|op| op.input(method_str).val == 'miRNeasy_Kit'}\n final_tubes = operations.length\n \n # Gathers and labels tubes for samples \n show do\n title 'Preparing Tubes'\n separator\n note 'Gather the following materials:'\n (!scrw_caps.empty?) ? (note \"<b>#{scrw_caps.length}</b> - 2mL Screw Cap tubes and label <b>#{scrw_caps.map { |op| op.temporary[:tube]}}</b>\" ) : nil\n (!zymo_tubes.empty?) ? (note \"<b>#{zymo_tubes.length}</b> - 1.5mL microfuge tubes and label <b>#{zymo_tubes.map { |op| \"ZD#{op.temporary[:tube]}\"}}</b>\" ) : nil\n (!rneasy_cols.empty?) ? (note \"<b>#{rneasy_cols.length}</b> - RNeasy Kit Columns and label <b>#{rneasy_cols.map {|op| op.temporary[:tube]}}</b>\" ) : nil\n separator\n (!ext_tubes.empty? || !mi_rneasy_cols.empty?) ? (note \"Place the following in a tube rack in the fume hood when ready:\" ): nil\n (!ext_tubes.empty?) ? (note \"<b>#{ext_tubes.length * 2}</b> - 1.5mL RNase Free tubes and label each pair <b>#{ext_tubes.map {|op| op.temporary[:tube]}}</b>\" ) : nil\n (!mi_rneasy_cols.empty?) ? (note \"<b>#{mi_rneasy_cols.length}</b> - miRNeasy Kit Columns and label <b>#{mi_rneasy_cols.map {|op| op.temporary[:tube]}}</b>\" ) : nil\n note \"<b>#{final_tubes}</b> - 1.5mL RNase-Free Tubes and use sticker dots to label <b>#{operations.map {|op| op.output(output_str).item.id}}</b>\"\n end\n \n \n \n # Table format of gathering tubes and allows for a visual mapping of which sample will go into what tubes\n headers = [\"Item ID\", \"Zymolase Digest\", \"2mL Screw Cap\", \"1st Extract\", \"2nd Extract\", \"RNeasy Columns\", \"miRNeasy Columns\", \"Final Tube ID\"]\n show do\n title 'Preparing Tubes Table'\n separator\n note \"<b>This table shows the previous slide in a table format</b>\"\n table operations.start_table\n .custom_column(heading: headers[0]) { |op| op.input(input_str).item.id}\n .custom_column(heading: headers[1]) { |op| (op.input(cell_lysis_str).val == 'Enzymatic') ? op.temporary[:tube] : '--'}\n .custom_column(heading: headers[2]) { |op| (op.input(cell_lysis_str).val == 'Mechanical' && op.input(input_str).object_type.name == 'Yeast Overnight Suspension') ? op.temporary[:tube] : '--'}\n .custom_column(heading: headers[3]) { |op| (op.input(method_str).val == 'miRNeasy_Kit') ? op.temporary[:tube] : '--'}\n .custom_column(heading: headers[4]) { |op| (op.input(method_str).val == 'miRNeasy_Kit') ? op.temporary[:tube] : '--' }\n .custom_column(heading: headers[5]) { |op| (op.input(method_str).val == 'RNeasy_Kit') ? op.temporary[:tube] : '--' }\n .custom_column(heading: headers[6]) { |op| (op.input(method_str).val == 'miRNeasy_Kit' ) ? op.temporary[:tube] : '--' }\n .custom_column(heading: headers[7], checkable: true) { |op| op.output(output_str).item.id}\n .end_table\n end\n \n # Gathers falcon tubes for quenching overnight suspension and/or falcon tubes for organic reagents used in miRNeasy method\n (!y_overnights.empty?) ? falcons = y_overnights.map {|o| o.input(input_str).item.id} : falcons = []\n (!mi_rneasy_cols.empty?) ? falcons = falcons.concat(['QIAzol', '100% Ethanol', 'Chloroform']) : falcons\n if (!falcons.empty?) \n show do \n title \"Labeling and Preparing Tubes\"\n separator\n note \"Gather <b>#{falcons.length}</b> 15mL falcon tubes and label:\"\n falcons.each {|i| check \"<b>#{i}</b>\"}\n end\n end\n end", "def proyections(ventas_base, augment, start_array, end_array) \n a = ventas_base.map.with_index do |sales, index|\n if index >= start_array && index <= end_array\n (sales*augment)\n else\n sales\n end\n end\n return a\nend", "def squares\r\n [@a1, @a2, @a3, @b1, @b2, @b3, @c1, @c2, @c3]\r\n \r\nend", "def graph_first_line\n array = [\"exercise\"]\n self.most_reps.times do |n|\n array << \"set #{n+1}\" \n array << 'string'\n end\n array\n end", "def render_subset_extend_interp_spokes(shiftx, shifty, color, ibegin, iend, srep_index)\n\n iend.each_with_index do |p, i|\n#\tif p.size >= 4 and (p[3].is_a? Integer) and p[3] >= 0 and p[3] < 3 \n#\t @app.stroke $sreps[p[3]].color\n if srep_index == 0\n if $subset_index.include? i\n \n\t\tif p.size >=3 and (p[2].is_a? Integer) and p[2] >= 0 and p[2] < 3 \n\t\t @app.stroke $sreps[p[2]].color\n\n other_srep_index = p[2]\n other_srep_spoke_index = p[3]\n other_srep_spoke_begin = $sreps[other_srep_index].interpolated_spokes_begin[other_srep_spoke_index]\n @app.line(other_srep_spoke_begin[0]+shiftx, other_srep_spoke_begin[1]+shifty, p[0]+shiftx, p[1]+shifty)\n\t\telse \n\t\t @app.stroke color\n\t\tend\n \n\t\[email protected](ibegin[i][0]+shiftx, ibegin[i][1]+shifty, p[0]+shiftx, p[1]+shifty)\n\t end\n end\n end\n end", "def line_ranges=(_); end", "def aff()\n\t\t\t\tfor q in 1..20\n\t\t\t\t\tputs\n\t\t\t\tend\n\t\t\tprint @plateau[0][0],\" | \", @plateau[0][1],\" | \",@plateau[0][2]\n\t\t\tputs\n\t\t\tputs \"---------\"\n\t\t\tprint @plateau[1][0],\" | \", @plateau[1][1],\" | \",@plateau[1][2]\n\t\t\tputs\n\t\t\tputs \"---------\"\n\t\t\tprint @plateau[2][0],\" | \", @plateau[2][1],\" | \",@plateau[2][2]\n\t\t\tputs\n\tend", "def produce_spaceship5(type: :freighter, size: :m, fuel_tank_vol: 100, engine_count: 4)\r\n #..\r\nend", "def add_section_plane(plane)\n end", "def initialize(${1:args})\n ${1:$\n(mapconcat\n '(lambda (x) (concat \"@\" x \" = \" x))\n (split-string text \", \")\n (concat \"\\n\" (make-string (current-column) 32)))\n}$0\nend", "def active_section_plane\n end", "def active_section_plane\n end", "def upc_a_with_composite_symbology; end", "def\n \nend\n\n\n# 6. sentence_maker refactored solution", "def variables\n @countries = [\"Australia\",\"Canada\",\"France\",\"Mexico\",\"Spain\",\"Norway\",\"Netherlands\",\"United States\"]\n @categories = [\"Art\",\"Music\",\"Film and Video\",\"Tech\",\"Dance\",\"Fashion\",\"Games\",\"Photography\",\"Theather\",\"Food\"]\n end", "def arrival(train)\n @trains << train\n end", "def reduce_begin_with(_production, _range, _tokens, _children)\n begin_anchor\n end", "def math_environments\n %w[align align*\n eqnarray eqnarray* equation equation*\n gather gather* gathered\n multline multline*\n ]\n end", "def neginv(b) return \"@SP\\nA=M-1\\nM=\"+b+\"M\\n\" end", "def formation; end", "def MyTables(tables,b,h,result) \n if tables == 1\n result=(b*h) #Cross sectional area\n elsif tables == 2\n result =((b*h**3)/12) #moment of inertia\n else\n result =((b*h**2)/6) #section modulus\n end #end of calculating result\nend", "def add_word!(a_string_param, an_array_param)\n a_string_param << \" rutabaga\"\n an_array_param << \"rutabaga\"\nend", "def genetic_code_table; 11; end", "def init_baseline\n @predicted = {}\n Oracle.mcrae_raw.each_pair do |category,exemplars|\n @predicted[category] = []\n exemplars.each_pair do |word,freq|\n @predicted[category].push [word,freq]\n end\n @predicted[category] = @predicted[category].sort { |a,b| b[1] <=> a[1] }[0..9].map { |x| x[0] }\n end\n @debug = false\n @cosines = Cosine.new(File.join(@datapath,\"cosine_wordmap.yaml\"),File.join(@datapath,\"cosine_matrix.yaml\"))\n end", "def solr_filter_concepts(prefix = '', join = ' || ')\n result = Item::ITYPE_CONCEPT.map{|e|\"#{prefix}itype_text:#{e}\"}.join(\"#{join}\")\n \"(#{result})\"\n end", "def pp_vector(vector)\n puts vector.map { |y| y.inspect[1...-1].split('').join(' ') + ' , ' }.join[0...-3]\n print_new_line\nend" ]
[ "0.5018968", "0.4945763", "0.4929118", "0.49246505", "0.49246505", "0.48243096", "0.48022875", "0.47756255", "0.47665703", "0.47657916", "0.476206", "0.47393003", "0.47198325", "0.47146022", "0.4691572", "0.4688209", "0.46785837", "0.46510273", "0.46494558", "0.46473074", "0.46461642", "0.46046454", "0.4596224", "0.4591017", "0.45698872", "0.45653546", "0.4552983", "0.45497373", "0.45469043", "0.45448327", "0.45448327", "0.45404115", "0.45387655", "0.45245045", "0.45101747", "0.4509305", "0.45081282", "0.4498993", "0.4490912", "0.4488622", "0.44644934", "0.44635147", "0.4453671", "0.4453671", "0.44535398", "0.4412595", "0.44103336", "0.44073987", "0.44066155", "0.44063583", "0.44025436", "0.4387885", "0.43861014", "0.4383785", "0.4383785", "0.4383785", "0.43835562", "0.43800437", "0.43780807", "0.43762287", "0.4368734", "0.4363939", "0.4361137", "0.4361137", "0.4359354", "0.43547133", "0.435244", "0.43491554", "0.4345731", "0.4343181", "0.43411872", "0.43405473", "0.43372017", "0.43353227", "0.43330026", "0.43323815", "0.4331881", "0.43312126", "0.4326043", "0.4325231", "0.43226", "0.43206805", "0.43197232", "0.43168667", "0.43084553", "0.43082407", "0.43082407", "0.43064898", "0.43030426", "0.43003994", "0.42994326", "0.42980734", "0.4289151", "0.42837778", "0.42806318", "0.42794815", "0.4276672", "0.42659837", "0.42637882", "0.42615828", "0.42593163" ]
0.0
-1
=begin car ship submarine nil =end
def next3 vehicles = ["car", "ship", "plane", "submarine", "train"] countries = ["japan", "us", "germany", "france", "russia"] vehicles.each do |vehicle| countries.each do |country| next if country.include?("j") puts "#{vehicle}, #{country}" end end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ship; end", "def sub_sector; end", "def region; end", "def nasa_space_craft; end", "def yeast; end", "def nebula; end", "def starship; end", "def vehicle; end", "def vehicle; end", "def vehicle; end", "def vehicle; end", "def vehicle; end", "def medical_fellowships\n # blank\n end", "def border=(_arg0); end", "def island; end", "def strain; end", "def settle_car(car)\n\t\tcar.outOfRange = true if(car.x<0 || car.x>@right || car.y < 0 || car.y > @top)\n\t\tcar.playground = self\n\tend", "def legionnairs\n\n end", "def super_sector; end", "def constellation; end", "def second_line\n \"#{self.town} #{self.district} #{self.region}\".squeeze(\" \").strip\n end", "def active_section_plane\n end", "def continent; end", "def car_no_puts(make, model)\n make + ' ' + model\nend", "def print_empty_line\n print_in_same_line(\" \")\n end", "def blank?\n ship.nil?\n end", "def area_wrt_ground ()\n \n end", "def active_section_plane\n end", "def active_section_plane\n end", "def celebration; end", "def rassoc(p0) end", "def setup\n # 原子のないセル。\n vectors00 = [[2.0, 2.0, 2.0], [0.0, 2.0, 2.0], [0.0, 0.0, 2.0]]\n axes00 = CrystalCell::LatticeAxes.new([[2.0, 2.0, 2.0], [0.0, 2.0, 2.0], [0.0, 0.0, 2.0]])\n @c00 = CrystalCell::Cell.new(axes00)\n @c00.comment = 'c00'\n\n # 元素の識別子を数字にしたもの。\n atoms = [\n CrystalCell::Atom.new( 0, [0.0, 0.0, 0.0] ),\n CrystalCell::Atom.new( 1, [0.1, 0.2, 0.3] ),\n ]\n @c01 = CrystalCell::Cell.new(axes00, atoms)\n @c01.comment = 'c01'\n\n # Li と O を1つずつ入れたセル。\n # @c02 = CrystalCell::Cell.new( [ [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0] ] )\n atoms = [\n CrystalCell::Atom.new( 'Li', [0.0, 0.0, 0.0], 'Li1' ),\n CrystalCell::Atom.new( 'O' , [0.2, 0.2, 0.2], 'O1' ),\n ]\n @c02 = CrystalCell::Cell.new(vectors00, atoms)\n @c02.comment = 'c02'\n\n # 原子の順序を逆にしたもの。\n atoms = [\n CrystalCell::Atom.new( 'O' , [0.2, 0.2, 0.2] ),\n CrystalCell::Atom.new( 'Li', [0.0, 0.0, 0.0] ),\n ]\n @c03 = CrystalCell::Cell.new(vectors00, atoms)\n @c03.comment = 'c03'\n\n # 原子の順序がいりまじったもの\n atoms = [\n CrystalCell::Atom.new( 'Li', [0.0, 0.0, 0.0] ),\n CrystalCell::Atom.new( 'O' , [0.2, 0.2, 0.2] ),\n CrystalCell::Atom.new( 'Li', [0.1, 0.2, 0.3] ),\n ]\n @c04 = CrystalCell::Cell.new(vectors00, atoms)\n @c04.comment = 'c04'\n\n # 原子が不足しているもの。\n atoms = [\n CrystalCell::Atom.new( 'Li', [0.0, 0.0, 0.0] ),\n ]\n @c05 = CrystalCell::Cell.new(vectors00, atoms)\n @c05.comment = 'c05'\n\n # Selective dynamics をいれたもの。\n atoms = [\n CrystalCell::Atom.new( 'Li', [0.0, 0.0, 0.0], nil, [true, false, false ] ),\n CrystalCell::Atom.new( 'O' , [0.2, 0.2, 0.2] ),\n ]\n @c06 = CrystalCell::Cell.new(vectors00, atoms)\n @c06.comment = 'c06'\n\n # 元素の識別子を数字にしたもの。\n atoms = [\n CrystalCell::Atom.new( 0, Mageo::Vector3DInternal[0.0, 0.0, 0.0] ),\n CrystalCell::Atom.new( 1, Mageo::Vector3DInternal[0.2, 0.2, 0.2] ),\n ]\n @c07 = CrystalCell::Cell.new(vectors00, atoms)\n @c07.comment = 'c07'\n\n # セル外の座標の原子を追加。\n atoms = [\n CrystalCell::Atom.new( 'Li', [ 1.2, 3.4, 5.6], \"atom0\", [ false, false, true] ),\n CrystalCell::Atom.new( 'O', [-1.2, -3.4, -5.6] ),\n ]\n @c08 = CrystalCell::Cell.new(vectors00, atoms)\n @c08.comment = 'c08'\n\n #cubic\n axes = CrystalCell::LatticeAxes.new(\n [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]\n )\n atoms = [\n CrystalCell::Atom.new( 0, [0.0, 0.0, 0.0] )\n ]\n @c10 = CrystalCell::Cell.new( axes, atoms)\n @c10.comment = 'cubic'\n\n #hexagonal\n axes = CrystalCell::LatticeAxes.new(\n [\n [0.86602540378443864676, 0.5, 0.0],\n [0.0, 1.0, 0.0],\n [0.0, 0.0, 1.0],\n ]\n )\n atoms = [\n CrystalCell::Atom.new( 0, [0.0, 0.0, 0.0] )\n ]\n @c11 = CrystalCell::Cell.new( axes, atoms)\n @c11.comment = 'hexagonal'\n\n #monoclinic\n axes = CrystalCell::LatticeAxes.new(\n [ [1.5, 1.4, 0.0],\n [0.0, 1.2, 0.0],\n [0.0, 0.0, 1.0],\n ]\n )\n atoms = [\n CrystalCell::Atom.new( 0, [0.0, 0.0, 0.0]),\n #CrystalCell::Atom.new( 0, [0.1, 0.2, 0.3]),\n #CrystalCell::Atom.new( 0, [0.2, 0.3, 0.4]),\n ]\n @c12 = CrystalCell::Cell.new( axes, atoms)\n @c12.comment = 'monoclinic'\n\n #orthorhombic\n axes = CrystalCell::LatticeAxes.new(\n [\n [3.0, 0.0, 0.0],\n [0.0, 2.0, 0.0],\n [0.0, 0.0, 1.0],\n ]\n )\n atoms = [\n CrystalCell::Atom.new( 0, [0.0, 0.0, 0.0])\n ]\n @c13 = CrystalCell::Cell.new( axes, atoms)\n @c13.comment = 'orthorhombic'\n\n #tetragonal\n axes = CrystalCell::LatticeAxes.new(\n [\n [2.0, 0.0, 0.0],\n [0.0, 2.0, 0.0],\n [0.0, 0.0, 1.0],\n ]\n )\n atoms = [\n CrystalCell::Atom.new( 0, [0.0, 0.0, 0.0], )\n ]\n @c14 = CrystalCell::Cell.new( axes, atoms)\n @c14.comment = 'tetragonal'\n\n #tetragonal-b\n axes = CrystalCell::LatticeAxes.new(\n [\n [2.0, 0.0, 0.0],\n [0.0, 1.0, 0.0],\n [0.0, 0.0, 2.0],\n ]\n )\n atoms = [\n CrystalCell::Atom.new( 0, [0.0, 0.0, 0.0], )\n ]\n @c14b = CrystalCell::Cell.new( axes, atoms)\n @c14b.comment = 'tetragonal-b'\n\n #triclinic\n axes = CrystalCell::LatticeAxes.new(\n [\n [1.5, 1.4, 1.3],\n [0.0, 1.2, 1.1],\n [0.0, 0.0, 1.0],\n ]\n )\n atoms = [\n CrystalCell::Atom.new( 0, [0.0, 0.0, 0.0] ),\n CrystalCell::Atom.new( 0, [0.1, 0.2, 0.3] ),\n CrystalCell::Atom.new( 0, [0.2, 0.3, 0.4] ),\n ]\n @c15 = CrystalCell::Cell.new( axes, atoms)\n @c15.comment = 'triclinic'\n\n #trigonal\n axes = CrystalCell::LatticeAxes.new(\n [\n [0.86602540378443864676, 0.5, 0.0],\n [0.0, 1.0, 0.0],\n [0.0, 0.0, 1.0],\n ]\n )\n atoms = [\n CrystalCell::Atom.new( 0, [0.0, 0.0, 0.0]),\n CrystalCell::Atom.new( 0, [0.333333333333333, 0.333333333333333, 0.333333333333333] ),\n ]\n @c16 = CrystalCell::Cell.new( axes, atoms)\n @c16.comment = 'trigonal'\n end", "def band; end", "def band; end", "def compact_blank; end", "def subdivisions; end", "def summer_olympics_sport; end", "def buscar(x, y)\r\n @buscarx = x\r\n @buscary = y\r\n @libre = false\r\n end", "def suitable_for_none\n \tself.disciplines.delete_all\n end", "def dish; end", "def north\n @north ||= south + SIZE\n end", "def suitable_for_none?\n \treturn self.disciplines.empty?\n end", "def heroine; end", "def sea; end", "def compact_blank!; end", "def p_blankline\n p do\n nbsp\n end\n end", "def plate\n @plate\n end", "def village; end", "def sub_c\n end", "def quarter_wind; end", "def planet; end", "def planet; end", "def planet; end", "def planet; end", "def planet; end", "def planet; end", "def winter_olympics_sport; end", "def vin; end", "def ravel; end", "def mycelial_gristmilling()\n xenopteri_shebang(sectwise_cessor, ungular_pietism)\n end", "def two_empty_in_line(marker)\n initial = Square::INITIAL_MARKER\n check_lines_for_constellation(marker, initial, initial)\n end", "def house; end", "def house; end", "def test_it_denies_valid_placement_if_ship_is_in_any_cells\n # Testing first ship\n @board.place(@cruiser, [\"A1\", \"A2\", \"A3\"])\n assert_equal false, @board.valid_placement?(@submarine, [\"A1\", \"B1\"])\n assert_equal false, @board.valid_placement?(@submarine, [\"A2\", \"B2\"])\n assert_equal false, @board.valid_placement?(@submarine, [\"A3\", \"B3\"])\n\n assert_equal true, @board.valid_placement?(@submarine, [\"B1\", \"B2\"]) # Horizontal\n assert_equal true, @board.valid_placement?(@submarine, [\"B1\", \"C1\"]) # Vertical\n end", "def neginv(b) return \"@SP\\nA=M-1\\nM=\"+b+\"M\\n\" end", "def aff()\n\t\t\tprint @plateau[0][0],\"|\", @plateau[0][1],\"|\",@plateau[0][2]\n\t\t\tputs\n\t\t\tputs \"-----\"\n\t\t\tprint @plateau[1][0],\"|\", @plateau[1][1],\"|\",@plateau[1][2]\n\t\t\tputs\n\t\t\tputs \"-----\"\n\t\t\tprint @plateau[2][0],\"|\", @plateau[2][1],\"|\",@plateau[2][2]\n\t\t\tputs\n\tend", "def vehicles; end", "def deco_pos; end", "def careers\n # blank\n end", "def production_curtailment; end", "def villian; end", "def sector; end", "def car_id\n super || rental.car_id\n end", "def industry; end", "def x______________STRUCTURE\r\nend", "def x______________STRUCTURE\r\nend", "def spread(infectious_cell)\n infectious_cell.virus.divide.infect cell north of infectious_cell\n infectious_cell.virus.divide.infect cell south of infectious_cell\n infectious_cell.virus.divide.infect cell east of infectious_cell\n infectious_cell.virus.divide.infect cell west of infectious_cell\n end", "def available_squares\n self.squares.select{|square| square == ' '}\n end", "def stairway\n puts '''\n ___\n ___|\n ___|\n ___|\n ___|\n ___|\n ___|\n ___|\n |\n '''\n end", "def masculine_name; end", "def carl\n @carl\n end", "def half_wind; end", "def isp; end", "def isp; end", "def star; end", "def monopoly\n\tmonopoly = {:railroads => {}}\n\nend", "def cure\n @all_ships.each(&:cure)\n end", "def bizet; end", "def droid; end", "def unusual_sport; end", "def -(mat)\n end", "def disclosure_of_interests\n # blank\n end", "def planets; end", "def variety; end", "def variety; end", "def species; end", "def spacing; 0; end", "def line_ranges=(_); end", "def p15\n\t\nend", "def rect; end", "def rect; end" ]
[ "0.5593012", "0.5591009", "0.5480554", "0.5415526", "0.54060555", "0.53649586", "0.5258123", "0.5249705", "0.5249705", "0.5249705", "0.5249705", "0.5249705", "0.5229712", "0.5196641", "0.51964045", "0.5189839", "0.51775247", "0.5146525", "0.51414627", "0.5137114", "0.5118467", "0.5103385", "0.5072543", "0.5050235", "0.50497824", "0.50301045", "0.5027664", "0.50264347", "0.50264347", "0.4995064", "0.49808514", "0.49631912", "0.49404123", "0.49404123", "0.49358544", "0.4934294", "0.4932684", "0.49317843", "0.49247566", "0.4921865", "0.48891342", "0.4870889", "0.4868846", "0.48514313", "0.48504657", "0.48486403", "0.48485738", "0.48478013", "0.48451272", "0.484286", "0.4837876", "0.4837876", "0.4837876", "0.4837876", "0.4837876", "0.48376775", "0.483593", "0.48224637", "0.48167506", "0.4810733", "0.4784807", "0.47831196", "0.47831196", "0.47767234", "0.4759867", "0.47441378", "0.4742005", "0.4741395", "0.47383797", "0.47340697", "0.47298896", "0.4728106", "0.4721154", "0.47121903", "0.47001073", "0.47001073", "0.46961334", "0.46909264", "0.46889666", "0.46807468", "0.46753675", "0.4672246", "0.4670731", "0.4670731", "0.46696094", "0.4667999", "0.46672514", "0.4664976", "0.4664933", "0.466329", "0.466291", "0.4662462", "0.4658429", "0.4655458", "0.4655458", "0.46521997", "0.46507218", "0.4642859", "0.4640175", "0.4639626", "0.4639626" ]
0.0
-1
Use callbacks to share common setup or constraints between actions.
def set_checklist_item @checklist_item = ChecklistItem.find(params[:id]) end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_required_actions\n # TODO: check what fields change to asign required fields\n end", "def action_hook; end", "def run_actions; end", "def define_action_hook; end", "def actions; end", "def define_action_helpers\n if super && action == :save\n @instance_helper_module.class_eval do\n define_method(:valid?) do |*args|\n self.class.state_machines.fire_event_attributes(self, :save, false) { super(*args) }\n end\n end\n end\n end", "def add_actions; end", "def callbacks; end", "def callbacks; end", "def setup *actions, &proc\n (@setup_procs ||= []) << [proc, actions.size > 0 ? actions : [:*]]\n end", "def define_action_helpers; end", "def post_setup\n end", "def action_methods; end", "def action_methods; end", "def action_methods; end", "def before_setup; end", "def action_run\n end", "def execute(setup)\n @action.call(setup)\n end", "def define_action_helpers?; end", "def set_actions\n actions :all\n end", "def action_done(action)\n dispatch = { :migrate => :done_migrating, :map => :done_mapping, :reduce =>\n :done_reducing, :finalize => :done_finalizing } \n self.send dispatch[action[:action]], action\n end", "def dependencies action, &block\n @actions.each do |other|\n if action[:requires].include? other[:provide]\n block.call other\n end\n end\n end", "def setup!\n return unless @setup_procs\n http_actions = actions\n @setup_procs.each do |setup_proc|\n proc, actions = setup_proc\n @setup__actions = actions.map do |action|\n\n action.is_a?(Regexp) ?\n http_actions.select { |a| a.to_s =~ action } :\n action.is_a?(String) && action =~ /\\A\\./ ?\n http_actions.map { |a| a.to_s << action if format?(a).include?(action) }.compact :\n action\n\n end.flatten\n self.class_exec &proc\n @setup__actions = nil\n end\n @setup_procs = nil\n end", "def before_actions(*logic)\n self.before_actions = logic\n end", "def setup_handler\n end", "def set_action(opts)\n opts = check_params(opts,[:actions])\n super(opts)\n end", "def setup(action)\n @targets.clear\n unless action.item.target_filters.empty?\n @targets = SES::TargetManager.make_targets(action)\n else\n item = action.item\n if item.for_opponent?\n @targets = $game_troop.alive_members\n elsif item.for_dead_friend?\n @targets = $game_party.battle_members.select { |actor| actor.dead? }\n else\n $game_party.battle_members.select { |actor| actor.alive? }\n end\n end\n @item_max = @targets.size\n create_contents\n refresh\n show\n activate\n end", "def action; end", "def action; end", "def action; end", "def action; end", "def action; end", "def workflow\n end", "def revisable_shared_setup(args, block)\n class << self\n attr_accessor :revisable_options\n end\n options = args.extract_options!\n self.revisable_options = Options.new(options, &block)\n \n self.send(:include, Common)\n self.send(:extend, Validations) unless self.revisable_options.no_validation_scoping?\n self.send(:include, WithoutScope::QuotedColumnConditions)\n end", "def setup\n @action = SampleActionAndroid.new(os_name: 'android',\n app_name: APP_PATH)\n end", "def before(action)\n invoke_callbacks *self.class.send(action).before\n end", "def process_action(...)\n send_action(...)\n end", "def before_dispatch(env); end", "def after_actions(*logic)\n self.after_actions = logic\n end", "def setup\n # override and do something appropriate\n end", "def setup(client)\n return unless @setup\n actions = @setup['setup'].select { |action| action['do'] }.map { |action| Action.new(action['do']) }\n actions.each do |action|\n action.execute(client)\n end\n self\n end", "def setup(_context)\n end", "def setup(resources) ; end", "def validate_actions\n errors.add(:base, :should_give_at_least_one_action) if !manage? && !forecasting? && !read? && !api?\n end", "def setup\n @resource_config = {\n :callbacks => {\n :before_create => nil,\n :after_create => nil,\n :before_update => nil,\n :after_update => nil,\n :before_destroy => nil,\n :after_destroy => nil,\n },\n :child_assoc => nil,\n :model => nil,\n :parent => nil,\n :path => nil,\n :permission => {},\n :properties => {},\n :relation => {\n :create => nil,\n :delete => nil,\n },\n :roles => nil,\n }\n end", "def determine_valid_action\n\n end", "def process_shared\n handle_taxes\n handle_shippings\n create_adjustments_from_params\n handle_status\n handle_inventory_refunds\n handle_payment_transactions\n order.updater.update\n end", "def startcompany(action)\n @done = true\n action.setup\n end", "def init_actions\n am = action_manager()\n am.add_action(Action.new(\"&Disable selection\") { @selection_mode = :none; unbind_key(32); bind_key(32, :scroll_forward); } )\n am.add_action(Action.new(\"&Edit Toggle\") { @edit_toggle = !@edit_toggle; $status_message.value = \"Edit toggle is #{@edit_toggle}\" })\n end", "def event_callbacks(event, metadata={})\n case event\n when :reset, :review\n if confirmed\n update_attributes(confirmed: false)\n end\n when :confirm\n confirm\n # trigger :order for all applicable items\n # NOTE: :order event is common to both physical and digital items\n items.each do |i|\n if i.event_permitted(:order)\n user_id = last_transition.user_id\n i.trigger!(:order, { order_id: id, user_id: user_id })\n end\n end\n when :complete_work\n request = metadata[:request]\n work_complete_notification(request)\n when :close\n close\n end\n if event != :close && !open\n reopen\n end\n end", "def setup_action\n return unless PONY::ERRNO::check_sequence(current_act)\n new_sequence = @action_sequence[@sequence_index+1...@action_sequence.size]\n @sequence_index = 0\n new_sequence = DND::SkillSequence::ACTS[@acts[1]] + new_sequence\n execute_sequence\n end", "def define_tasks\n define_weave_task\n connect_common_tasks\n end", "def setup(&block)\n define_method(:setup, &block)\n end", "def setup\n transition_to(:setup)\n end", "def setup\n transition_to(:setup)\n end", "def action\n end", "def setup( *args )\n\t\t\tself.class.setupBlocks.each {|sblock|\n\t\t\t\tdebugMsg \"Calling setup block method #{sblock}\"\n\t\t\t\tself.send( sblock )\n\t\t\t}\n\t\t\tsuper( *args )\n\t\tend", "def config(action, *args); end", "def setup\n @setup_proc.call(self) if @setup_proc\n end", "def before_action \n end", "def setup_callbacks\n defined_callbacks.each do |meth|\n unless respond_to?(\"call_#{meth}_callbacks\".to_sym)\n self.class.module_eval <<-EOE\n def call_#{meth}_callbacks(*args)\n plugin_store.each {|a| a.call_#{meth}_callbacks(*args) } if respond_to?(:plugin_store) && plugin_store\n self.send :#{meth}, *args if respond_to?(:#{meth})\n end\n EOE\n end\n end\n end", "def action\n end", "def matt_custom_action_begin(label); end", "def setup\n # override this if needed\n end", "def setup\n\t\t\t\t\t\t# Do nothing\n\t\t\t\tend", "def setup\n\t\t\t\t\t\t# Do nothing\n\t\t\t\tend", "def action(options,&callback)\n new_action = Action===options ? options : Action.new(options,&callback)\n # replace any with (shared name/alias or both default) + same arity\n @actions.delete_if do |existing_action|\n ((existing_action.names & new_action.names).size > 0 ||\n existing_action.default? && new_action.default?) &&\n existing_action.required.size == new_action.required.size &&\n existing_action.optional.size <= new_action.optional.size\n end\n @actions = (@actions + [new_action]).sort\n new_action\n end", "def set_target_and_action target, action\n self.target = target\n self.action = 'sugarcube_handle_action:'\n @sugarcube_action = action\n end", "def after(action)\n invoke_callbacks *options_for(action).after\n end", "def pre_task\n end", "def setup(server)\n server.on('beforeMethod', method(:before_method), 10)\n end", "def add_actions\n attribute = machine.attribute\n name = self.name\n \n owner_class.class_eval do\n define_method(name) {self.class.state_machines[attribute].events[name].fire(self)}\n define_method(\"#{name}!\") {self.class.state_machines[attribute].events[name].fire!(self)}\n define_method(\"can_#{name}?\") {self.class.state_machines[attribute].events[name].can_fire?(self)}\n end\n end", "def init_actions\n @select_action = SelectAction.new\n @endpoint_mouse_action = EndpointMouseAction.new\n @move_action = MoveAction.new\n end", "def setup_signals; end", "def after_created\r\n return unless compile_time\r\n Array(action).each do |action|\r\n run_action(action)\r\n end\r\nend", "def after_created\r\n return unless compile_time\r\n Array(action).each do |action|\r\n run_action(action)\r\n end\r\nend", "def set_target_and_action target, action\n self.target = target\n self.action = 'sugarcube_handle_action:'\n @sugarcube_action = action.respond_to?('weak!') ? action.weak! : action\n end", "def initialize(*args)\n super\n @action = :set\nend", "def after_set_callback; end", "def setup\n #implement in subclass;\n end", "def lookup_action; end", "def setup &block\n if block_given?\n @setup = block\n else\n @setup.call\n end\n end", "def setup_action\n return TSBS.error(@acts[0], 1, @used_sequence) if @acts.size < 2\n actions = TSBS::AnimLoop[@acts[1]]\n if actions.nil?\n show_action_error(@acts[1])\n end\n @sequence_stack.push(@acts[1])\n @used_sequence = @acts[1]\n actions.each do |acts|\n @acts = acts\n execute_sequence\n break if @break_action\n end\n @sequence_stack.pop\n @used_sequence = @sequence_stack[-1]\n end", "def release_actions; end", "def around_hooks; end", "def save_action; end", "def setup(easy)\n super\n easy.customrequest = @verb\n end", "def action_target()\n \n end", "def setup\n callback(:setup) do\n notify(:setup)\n migration_check.last_deployed_commit\n end\n end", "def setup\n return unless @setup\n\n actions = @setup['setup'].select { |action| action['do'] }.map { |action| Action.new(action['do']) }\n run_actions_and_retry(actions)\n self\n end", "def before_setup\n # do nothing by default\n end", "def my_actions(options)\n @setup = false\n get_template_part(\"custom_used\",\"action_users\",true)\n end", "def default_action; end", "def setup(&blk)\n @setup_block = blk\n end", "def callback_phase\n super\n end", "def advice\n end", "def _handle_action_missing(*args); end", "def duas1(action)\n action.call\n action.call\nend", "def shared_action(name, &block)\n @controller.shared_actions[name] = block\n end", "def before_action action, &block\n @audience[:before][action] ||= Set.new\n @audience[:before][action] << block\n end", "def setup_initial_state\n\n state_a = State.new(\"a\", 0)\n state_b = State.new(\"b\", 0)\n state_c = State.new(\"c\", 10)\n\n move_to_b = Action.new(\"move_to_b\", 1, state_b)\n\n move_to_c = Action.new(\"move_to_c\", 1, state_c)\n\n state_a.actions = [move_to_b, move_to_c]\n\n return state_a\n \nend" ]
[ "0.6163163", "0.6045976", "0.5946146", "0.591683", "0.5890051", "0.58349305", "0.5776858", "0.5703237", "0.5703237", "0.5652805", "0.5621621", "0.54210985", "0.5411113", "0.5411113", "0.5411113", "0.5391541", "0.53794575", "0.5357573", "0.53402257", "0.53394014", "0.53321576", "0.53124547", "0.529654", "0.5296262", "0.52952296", "0.52600986", "0.52442724", "0.52385926", "0.52385926", "0.52385926", "0.52385926", "0.52385926", "0.5232394", "0.523231", "0.5227454", "0.52226824", "0.52201617", "0.5212327", "0.52079266", "0.52050185", "0.51754695", "0.51726824", "0.51710224", "0.5166172", "0.5159343", "0.51578903", "0.51522785", "0.5152022", "0.51518047", "0.51456624", "0.51398855", "0.5133759", "0.5112076", "0.5111866", "0.5111866", "0.5110294", "0.5106169", "0.509231", "0.50873137", "0.5081088", "0.508059", "0.50677156", "0.50562143", "0.5050554", "0.50474834", "0.50474834", "0.5036181", "0.5026331", "0.5022976", "0.5015441", "0.50121695", "0.5000944", "0.5000019", "0.4996878", "0.4989888", "0.4989888", "0.49864885", "0.49797225", "0.49785787", "0.4976161", "0.49683493", "0.4965126", "0.4958034", "0.49559742", "0.4954353", "0.49535993", "0.4952725", "0.49467874", "0.49423352", "0.49325448", "0.49282882", "0.49269363", "0.49269104", "0.49252945", "0.4923091", "0.49194667", "0.49174926", "0.49173003", "0.49171105", "0.4915879", "0.49155936" ]
0.0
-1
Never trust parameters from the scary internet, only allow the white list through.
def checklist_item_params params.require(:checklist_item).permit( :description, :checked ) end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strong_params\n params.require(:user).permit(param_whitelist)\n end", "def strong_params\n params.require(:listing_member).permit(param_whitelist)\n end", "def allow_params_authentication!; end", "def allowed_params\n ALLOWED_PARAMS\n end", "def default_param_whitelist\n [\"mode\"]\n end", "def param_whitelist\n [:role, :title]\n end", "def expected_permitted_parameter_names; end", "def safe_params\n params.except(:host, :port, :protocol).permit!\n end", "def strong_params\n params.require(:team_member).permit(param_whitelist)\n end", "def permitir_parametros\n \t\tparams.permit!\n \tend", "def strong_params\n params.require(:community).permit(param_whitelist)\n end", "def permitted_strong_parameters\n :all #or an array of parameters, example: [:name, :email]\n end", "def strong_params\n params.require(:education).permit(param_whitelist)\n end", "def restricted_params\n #params.require(self.controller_name.classify.underscore.to_sym).permit([])\n raise(\"No strong params set, override restricted_params method in your controller. E.g. params.require(:model).permit(:attribute1, :attribute2)\")\n end", "def allowed_params\n params.require(:user).permit(:username, :email, :password, :password_confirmation)\n end", "def param_whitelist\n [:rating, :review]\n end", "def param_whitelist\n whitelist = [\n :username, :name,\n :parent_id,\n :headline, :description, :video,\n :policy, :signup_mode, :category,\n :website, :facebook, :twitter, :linkedin,\n :founded_at,\n privacy: [\n :events,\n :resources\n ],\n permission: [\n :profile,\n :members,\n :children,\n :statistics,\n :posts,\n :listings,\n :resources,\n :events\n ],\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n \n if action_name === 'update'\n whitelist.delete(:parent_id)\n unless current_user.role_in(@community) === 'owner'\n whitelist.delete(:privacy)\n whitelist.delete(:permission)\n end\n end\n \n whitelist\n end", "def param_whitelist\n if @user.present? && current_user != @user\n return [:followed]\n end\n \n whitelist = [\n :username, :email, :password,\n :first_name, :last_name,\n :birthday, :gender,\n :headline, :biography, :ask_about, :focus,\n :website, :facebook, :linkedin, :twitter, :github,\n roles: [],\n skills: [],\n interests: [],\n privacy: { contact: [] },\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n \n if action_name === 'update'\n whitelist.delete(:email)\n whitelist.delete(:password)\n end\n \n whitelist\n end", "def user_params \n \tparams.require(:user).permit(:name, :email, :password, :password_confirmation)# preventing CSTR\n end", "def user_params\n params.permit(:name, :phoneNumber, :address, :postalCode, :local, :link, :counter, :latitude, :longitude) \n end", "def valid_params_request?; end", "def strong_params\n params.require(:experience).permit(param_whitelist)\n end", "def trim_whitelisted(params, whitelist)\n # remove any parameters that are not whitelisted\n params.each do |key, value|\n # if white listed\n if whitelist.include? key\n # strip the parameters of any extra spaces, save as string\n params[key] = value.to_s.strip\n else\n # delete any unauthorized parameters\n params.delete key\n end\n end\n params\n end", "def whitelist_url_params\n params.require(:whitelist_url).permit(:domain)\n end", "def allowed_params\n params.require(:allowed).permit(:email)\n end", "def permitted_params\n []\n end", "def trim_whitelisted(params, whitelist)\n # remove any parameters that are not whitelisted\n params.each do |key, value|\n # if white listed\n if whitelist.include? key\n # strip the parameters of any extra spaces, save as string\n params[key] = value.to_s.strip\n else\n # delete any unauthorized parameters\n params.delete key\n end\n end\n params\n end", "def safe_params\n params.permit(:id, :name, :origin, :emails => []); #emails is an array\n end", "def query_param\n\t\tparams.permit(:first_name, :last_name, :phone)\n\tend", "def strong_params\n params.require(:success_metric).permit(param_whitelist)\n end", "def devise_filter\r\n logger.debug(\"In devise_filter =>PARAMS: #{params.inspect}\")\r\n\r\n # White list for sign_up\r\n devise_parameter_sanitizer.for(:sign_up) { |u| u.permit(user_whitelist) }\r\n\r\n # White list for account update\r\n devise_parameter_sanitizer.for(:account_update) { |u| u.permit(user_whitelist, :current_password) }\r\n\r\n # White list for Invitation creation\r\n devise_parameter_sanitizer.for(:invite) { |u| u.permit(:account_type, :email, :invitation_token)}\r\n\r\n # White list for accept invitation\r\n devise_parameter_sanitizer.for(:accept_invitation) { |u| u.permit(user_whitelist, :invitation_token)}\r\n\r\n end", "def whitelisted_user_params\n params.require(:user).\n permit( :first_name, :last_name, :email,:password,:password_confirmation,:birthday,:gender)\n end", "def user_params\n ActionController::Parameters.permit_all_parameters = true\n params.require(:user) #.permit(:name, :surname, :phone, :password, :email, :time_zone)\n end", "def strong_params\n params.require(:metric_change).permit(param_whitelist)\n end", "def safe_params\n params.require(:user).permit(:name)\n end", "def get_params\n\t\treturn ActionController::Parameters.new(self.attributes).permit(\"account_id\", \"title\", \"category\", \"introduction\", \"tags\", \"segment_type\", \"visible\", \"status\", \"main_image\")\n\tend", "def grant_params\n @whitelisted = params.require(:grant).permit(:name, :description, :agency_id, :acronym)\n end", "def check_params; true; end", "def param_whitelist\n whitelist = [\n :description,\n :progress,\n :kpi_id\n ]\n \n unless action_name === 'create'\n whitelist.delete(:kpi_id)\n end\n \n whitelist\n end", "def quote_params\n params.permit!\n end", "def valid_params?; end", "def paramunold_params\n params.require(:paramunold).permit!\n end", "def user_params\n\t\tparams.permit(:nickname, :avatar, :description, :password, :gender, :birthday, :email, :phone, :qq_id, :wechat_id)\n\tend", "def filtered_parameters; end", "def user_params\n params.permit(\n \t:id,\n \t:email, \n \t:first_name, \n \t:last_name, \n \t:password, \n \t:confirm_token, \n \t:phone_number,\n \t:facebook_link,\n \t:car_model,\n \t:license_plate)\n end", "def filtering_params\n params.permit(:email, :name)\n end", "def check_params\n true\n end", "def wx_public_params\n params.require(:wx_public).permit(:nickname, :manager, :alias)\n end", "def allowed_params\n params.require(:user).permit(:email, :password, :role, :first_name, :last_name, :password_confirmation)\n end", "def allowed_params\n params.require(:user).permit(:email, :password, :role, :first_name, :last_name, :password_confirmation)\n end", "def listing_params\n\t\tparams.permit(:address, :transit_info, :rules, :other_info, :lat, :lng)\n\tend", "def social_account_params\n\t\t\tparams.require(:social_account).permit!\n\t\tend", "def safe_params\n resurce_name = self.class.resource_name\n params_method_name = \"#{resurce_name}_params\".to_sym\n if params[resurce_name]\n if respond_to?(params_method_name) || private_methods.include?(params_method_name)\n send(params_method_name)\n else\n raise ActiveModel::ForbiddenAttributesError, \"Please, define the '#{params_method_name}' method in #{self.class.name}\"\n end\n end\n end", "def url_params\n params.require(:url).permit(:short_url, :original_url, :clicks, :ip_addresses)\n end", "def user_params\n params.require(:user).permit(:uri, :username, :password, :realname, :email, :publicvisible)\n end", "def model_params\n\t\tparams.require(:manager).permit(\n\t :user_name,\n :password,\n :email,\n \t\t\t)\n\tend", "def article_params_whitelist\n params.require(:article).permit(:title, :description, category_ids: [])\n end", "def college_whitelist_params\n params.require(:college_whitelist).permit(:status)\n end", "def active_code_params\n params[:active_code].permit\n end", "def filtering_params\n params.permit(:email)\n end", "def valid_params(params)\n params.permit(:user_id, :photo_id, :originX, :originY, :width, :height)\n end", "def ip_address_params\n\t\t\tparams.require(:ip_address).permit!\n end", "def pull_request_params\n whitelist = [\n :url,\n :id,\n :html_url,\n :diff_url,\n :patch_url,\n :issue_url,\n :number,\n :state,\n :locked,\n :title\n ]\n params.require(:pull_request).permit(whitelist)\n end", "def reserved_params\n params.require(:reserved).permit(:name, :email, :pax, :address, :KTP, :title)\n end", "def post_params\n if current_user.admin? \n params.permit(:title, :body, :city, :country, :gps_location, :privacy, :visible, :latitude, :longitude, images: [], files: [])\n else \n params.permit(:title, :body, :city, :country, :gps_location, :privacy,:latitude, :longitude, images: [], files: [])\n end \n end", "def list_params\n params.permit(:name)\n end", "def filter_parameters; end", "def filter_parameters; end", "def vineyard_params\n params.permit(:vineyard_name, :email, :website_url, :phone, :address, :city, :region, :postcode, :country, :specialty, :description, :pet_friendly, :holiday, :tours, :events, :family_friendly, :cover_image, :image_one, :image_two, :image_three, :image_four, :user_id, :base64)\n end", "def available_activity_params\n # params.require(:available_activity).permit(:type,:geometry,:properties)\n whitelisted = ActionController::Parameters.new({\n type: params.require(:available_activity)[:type],\n geometry: params.require(:available_activity)[:geometry].try(:permit!).to_h,\n properties: params.require(:available_activity)[:properties].try(:permit!).to_h\n }).try(:permit!)\n end", "def user_params\n params.permit(:name, :username, :email, :password, :img_url, :bg_url, :coinbank)\n end", "def user_params_pub\n\t \tparams[:user].permit(:hruid)\n\t end", "def user_params\n params.permit(:id, :email, :password, :nickname, :status, :avatar, :flat_picture, :flatsharing_id, :member,\n :user, :color, :solde)\n end", "def validate_search_inputs\n @whitelisted = params.fetch(:user, nil)\n if @whitelisted.blank?\n render_error(400, \"#{I18n.t('general_error.params_missing_key')}\": [I18n.t('general_error.params_missing_value', model: \"review\")])\n return\n else\n @whitelisted = @whitelisted.permit(:name, :uen, :description)\n end\n end", "def param_whitelist\n [\n :title,\n :description,\n :organization,\n :team_id,\n :started_at,\n :finished_at,\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n end", "def url_whitelist; end", "def admin_social_network_params\n params.require(:social_network).permit!\n end", "def filter_params\n params.require(:filters).permit(:letters)\n end", "def origin_params\n params.permit(:country, :state, :city, :postal_code, :address, :description)\n end", "def valid_params(params)\n params.permit(:login, :first_name, :last_name, \n :password, :password_confirmation)\n end", "def sensitive_params=(params)\n @sensitive_params = params\n end", "def permit_request_params\n params.permit(:address)\n end", "def user_params\n # Ensure a user can't give themselves admin priveleges\n params.delete(:admin) if current_user.admin?\n params.require(:user).permit(:name, :email, :admin, :image)\n end", "def secure_params\n params.require(:location).permit(:name)\n end", "def strong_params\n params.require( :setting ).\n permit( :global_scan_limit, :per_user_scan_limit,\n :target_whitelist_patterns, :target_blacklist_patterns )\n end", "def question_params\n params.require(:survey_question).permit(question_whitelist)\n end", "def case_insensitive_params\n params.require(:case_insensitive).permit(:name)\n end", "def empire_master_no_match_params\n params.require(:empire_master_no_match).permit(:uid, :last_name, :list, :search_date, :double, :source)\n end", "def maintenance_request_params\n params[:maintenance_request].permit! #allow all parameters for now\n end", "def unwanted_params\n params.require(:unwanted).permit(:title, :description, :image)\n end", "def url_params\n params[:url].permit(:full)\n end", "def backend_user_params\n params.permit!\n end", "def filter_params\n\t\treturn params[:candidate].permit(:name_for_filter)\n\tend", "def speed_measurement_params\n\n #fuckit, to lazy to deal with permit crap right now\n ActionController::Parameters.permit_all_parameters = true\n\n params[:speed_measurement]\n end", "def user_params\n params.permit(:name, :age, :username, :display_photo, :password)\n end", "def get_params\r\n #params.require(:article).permit(:title, :permalink, :content, :source_site, :introtext, :type_id, :order_by, :searchable, :created_by, :edited_by, :published_by, :published_on, :user_id)\r\n params.require(:article).permit!\r\n\r\n end", "def pub_params\n params.require(:pub).permit(:name, :description, :phone, :email, :hidden, :city_id, :address)\n end", "def pass_params\n params[:pass].permit(:name, :price, :description, :colour, :events)\n end", "def droptraining_params\n params.permit(:training_id,:user_id, :utf8, :authenticity_token, :commit)\n end", "def person_params\n # params whitelist does *not* include admin, sub, remember_token\n # TBD: share this whitelist with the list used by configuration_permitted_parameters\n # TBD: should current_password be on this list? -- for now, leaving off, since it seems to work without\n # NOTE: do not include 'admin' in this list!\n params.require(:person).permit(\n :name, \n :email, \n :description,\n :password, \n :password_confirmation\n )\n end", "def parameter_params\n params.require(:parameter).permit(:name, :description, :param_code, :param_value, :active_from, :active_to)\n end" ]
[ "0.69792545", "0.6781151", "0.67419964", "0.674013", "0.6734356", "0.6591046", "0.6502396", "0.6496313", "0.6480641", "0.6477825", "0.64565", "0.6438387", "0.63791263", "0.63740575", "0.6364131", "0.63192815", "0.62991166", "0.62978333", "0.6292148", "0.6290449", "0.6290076", "0.62894756", "0.6283177", "0.6242471", "0.62382483", "0.6217549", "0.6214457", "0.6209053", "0.6193042", "0.6177802", "0.6174604", "0.61714715", "0.6161512", "0.6151757", "0.6150663", "0.61461", "0.61213595", "0.611406", "0.6106206", "0.6105114", "0.6089039", "0.6081015", "0.6071004", "0.60620916", "0.6019971", "0.601788", "0.6011056", "0.6010898", "0.6005122", "0.6005122", "0.6001556", "0.6001049", "0.59943926", "0.5992201", "0.59909594", "0.5990628", "0.5980841", "0.59669393", "0.59589154", "0.5958826", "0.5957911", "0.5957385", "0.5953072", "0.59526145", "0.5943361", "0.59386164", "0.59375334", "0.59375334", "0.5933856", "0.59292704", "0.59254247", "0.5924164", "0.59167904", "0.59088355", "0.5907542", "0.59064597", "0.5906243", "0.5898226", "0.589687", "0.5896091", "0.5894501", "0.5894289", "0.5891739", "0.58860534", "0.5882406", "0.587974", "0.58738774", "0.5869024", "0.58679986", "0.5867561", "0.5865932", "0.5864461", "0.58639693", "0.58617616", "0.5861436", "0.5860451", "0.58602303", "0.5854586", "0.58537364", "0.5850427", "0.5850199" ]
0.0
-1
clear cache of ECMA262 elements
def clear_cache @lit_cache = {} @lit_nextpos = {} end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear_cache() @cache = {}; end", "def clear_cache; end", "def clear_cache\n @all = nil\n end", "def clear!\n @cache = {}\n end", "def cache_clear\n @moneta.clear\n end", "def clear_cache\n @results = nil\n @result_ids = nil\n @num_results = nil\n @letters = nil\n end", "def clear\n cache.clear\n end", "def clear\n @cache = {}\n end", "def clear\n @cache.clear\n end", "def clear_cache\n @cache = {}\n end", "def invalidate_cache!\n elements.each do |element|\n element.invalidate_cache!\n end\n\n self\n end", "def clear_cache!\n @cache = {}\n end", "def clear_cache!\n @cache = {}\n end", "def clear_all!\n @cache = Cache.new\n end", "def clear_cached_js\n self.map_fns.map! { nil }\n self.save\n end", "def clear\r\n @cache.flush\r\n end", "def clear_cache(create_new_object = T.unsafe(nil)); end", "def user_clear (element)\r\n begin\r\n key_processor(element)\r\n ****_clear(@selector, @locator)\r\n rescue Exception => e\r\n raise e.message\r\n raise e.backtrace.inspect\r\n end\r\n end", "def clear_caches\n self.log.debug \"Clearing entry and values caches.\"\n\t\t@entry = nil\n\t\[email protected]\n\tend", "def clear_cache!\n @sorted = false\n end", "def invalidate_tag_cache!\n @tags_for_node_cache = nil\n @tags_cache = nil\n @tags_type_cache = nil\n end", "def clear_cache\n ccs.each(&:clear_cache)\n end", "def reset\n @entry_cache.clear\n end", "def cache(count)\n @cached_elements = active_elements.pop(count)\n return\n end", "def clear() end", "def clear() end", "def clear_cache! *keys\n clear_cache *keys\n ipcm_trigger :clear_cache, *keys\n end", "def clear_cache_like! *keys\n clear_cache_like *keys\n ipcm_trigger :clear_cache_like, *keys\n end", "def clear_cache!\n # this should be overridden by concrete adapters\n end", "def clear; end", "def clear; end", "def clear; end", "def clear; end", "def clear; end", "def clear; end", "def clear; end", "def clear\n @cache.clear\n entries.clear\n self\n end", "def cache_clear\n @store.clear\n end", "def clear_cached_values\n @chocoversion = nil\n @compiled_choco = nil\n end", "def clear!; end", "def clear!; end", "def clear!; end", "def clear_cached_vars\n @_rendered , @_versions = false , false\n end", "def clear\n keys.each do |k|\n Jeapie.instance_variable_set(\"@#{k}\", nil)\n end\n end", "def clear\n @next = nil\n @size = 0\n @stored = {}\n nil\n end", "def free\n cache.clear\n nil\n end", "def clear\n\n set_id_to_cache_key_map\n\n cache_response = fetch\n @id_to_cache_key_map.each do |id, key|\n Memcache.delete(key)\n Memcache.delete(\"ca_sa_shared_de_sa_cs_#{cache_response[id][:api_key]}\")\n end\n\n nil\n\n end", "def delete_all_caching_without_touching_additives\n\t\t\tself.delete_category_menu_fragments\n \t\tself.delete_cache\n \t\tself.delete_shared_item_items\n\t\t\tself.delete_category_browser_fragments\nend", "def clear_cache\n property_cache.clear\n end", "def reset\r\n @cache.reset\r\n end", "def reset!\n @cache = nil\n end", "def cache_clear\n DrgCms.cache_clear(:dc_page)\nend", "def clear\n\n set_id_to_cache_key_map\n\n @id_to_cache_key_map.each do |_, keys|\n Memcache.delete(keys[:kit])\n Memcache.delete_from_all_instances(keys[:saas])\n end\n\n nil\n\n end", "def cache_clear\n @store.delete\n end", "def rehash\n @rules.rehash\n @elements = nil\n end", "def cache_clear\n @store.flush_all\n rescue ::MemCache::MemCacheError => e\n Log.error(e)\n nil\n end", "def clear\n @cache.clear\n self\n end", "def cache_clear\n @dataset.delete\n end", "def cache_clear\n @dataset.delete\n end", "def delete_duplicates(el)\n location = @cache.index(el)\n @cache.delete_at(location) if location\n end", "def clear_checksum_cache!\n @lookup_checksums = {}\n end", "def clear ; @data.clear ; end", "def clear\n return unless @list\n @list.clear\n @native_text.clear if @native_text # check this line, should it be removed 2014-08-27 - 20:54\n fire_dimension_changed :clear\n init_vars\n end", "def flush_memos\n CACHE.clear\n end", "def clear_compilation_cache\n {\n method: \"Page.clearCompilationCache\"\n }\n end", "def invalidate_sorted_cache\n\t\tmod_sorted = nil\n\t\tmod_ranked = nil\n\tend", "def clear\n set_hash.each { |regexp, hash| hash.clear }\n end", "def clear\n @ary.clear\n @heapsize = 0\n @mode = nil\n end", "def cache_clear\n @client.flushall\n end", "def clear_cache\n @copyright_status = nil\n end", "def clear\n hashed.clear\n list.clear\n end", "def clear_buffer_cache!\n @buffers = nil\n end", "def _clear_cache\n @cache_parent.clear\n end", "def clear\n raise \"not implemented\"\n end", "def clear_all_caches\n $CACHE.clear\n remove_cached_feeds\n remove_cached_list_of_taxon_concepts\n if ActionController::Base.cache_store.class == ActiveSupport::Cache::MemCacheStore\n ActionController::Base.cache_store.clear\n return true\n else\n return false\n end\n end", "def clear() \n @obj.clear() \n end", "def flush!\n @_cache = {}\n end", "def clear_compiler *keys\n keys.size == 0 ?\n compiler_pool.clear :\n keys.each do |key|\n compiler_pool.keys.each { |k| k.first == key && compiler_pool.delete(k) }\n end\nend", "def flush_cache; end", "def clear!(klass)\n @cache.delete(klass)\n end", "def clear\n @hash_tags.clear\n end", "def update_cache\r\n Rails.cache.delete(\"All#{self.class.name.to_s}\")\r\n end", "def clear\n end", "def clear\n end", "def cache_clear\n DrgCms.cache_clear(:dc_category)\nend", "def remove_from_cache\n redis.hdel 'identifiers', self.typed_id\n redis.srem 'identifier:' + item.typed_id, self.typed_id\n end", "def clear\n @hash.del\n @index.del\n end", "def clear_cache\n @access.refresh\n end", "def clear_cache\n @access.refresh\n end", "def clear\n assert_exists\n @element.clear\n end", "def clear\n @known = []\n end", "def cache; end", "def cache; end", "def cache; end", "def cache; end", "def cache; end", "def cache; end", "def cache; end", "def cleanup_cache\n\t\t\tputs \"Cleaning up cache\"\n\t\t\texpire_older_than = ((Time.now.to_f - MiniProfiler::EXPIRE_TIMER_CACHE) * 1000).to_i\n\t\t\t@timer_struct_lock.synchronize {\n\t\t\t\t@timer_struct_cache.delete_if { |k, v| v['Root']['StartMilliseconds'] < expire_older_than }\n\t\t\t}\n\t\tend", "def clear\n @a.clear\n end" ]
[ "0.7347165", "0.7269472", "0.7045574", "0.7045122", "0.694833", "0.69387525", "0.69051886", "0.6892014", "0.68281543", "0.6808932", "0.6758927", "0.6749398", "0.6749398", "0.6698103", "0.66908604", "0.6682596", "0.66582125", "0.6645091", "0.66368103", "0.66311175", "0.662571", "0.66114587", "0.6598892", "0.65525365", "0.65366596", "0.65366596", "0.6534859", "0.6527347", "0.6516532", "0.6480251", "0.6480251", "0.6480251", "0.6480251", "0.6480251", "0.6480251", "0.6480251", "0.646398", "0.64617485", "0.64268184", "0.63881063", "0.63881063", "0.63881063", "0.6360746", "0.63392377", "0.62919176", "0.6288928", "0.6275581", "0.62732923", "0.6273237", "0.6270429", "0.6250027", "0.6246138", "0.62429494", "0.62428933", "0.6220287", "0.6205984", "0.6201945", "0.61930895", "0.61930895", "0.6178976", "0.61716163", "0.61524165", "0.6126548", "0.6120184", "0.61169016", "0.6106639", "0.6106055", "0.61028284", "0.6095429", "0.6077319", "0.6059657", "0.6049003", "0.6039435", "0.60261947", "0.6005065", "0.60027313", "0.60003096", "0.5997367", "0.59966713", "0.59925336", "0.5984547", "0.59820294", "0.59524643", "0.59524643", "0.5949363", "0.59469587", "0.59456503", "0.5944044", "0.5944044", "0.5937251", "0.5933734", "0.59295124", "0.59295124", "0.59295124", "0.59295124", "0.59295124", "0.59295124", "0.59295124", "0.59272516", "0.5925369" ]
0.7439159
0
Fetch next literal and forward position.
def next_input_element(hint) if ret = @lit_cache[@pos] @pos = @lit_nextpos[@pos] @head_pos = @pos return ret end pos0 = @pos # # skip white space here, because ECMA262(5.1.2) says: # # Simple white space and single-line comments are discarded and # do not appear in the stream of input elements for the # syntactic grammar. # while white_space or single_line_comment end ret = line_terminator || multi_line_comment || token if ret @lit_cache[pos0] = ret @lit_nextpos[pos0] = @pos @head_pos = @pos return ret end if @codes[@pos].nil? return nil end if hint.nil? if @codes[@pos] == 0x2f ECMA262::LIT_DIV_OR_REGEXP_LITERAL else nil end elsif hint == :div ret = div_punctuator if ret @lit_cache[pos0] = ret @lit_nextpos[pos0] = @pos end @head_pos = @pos return ret elsif hint == :regexp ret = regexp_literal if ret @lit_cache[pos0] = ret @lit_nextpos[pos0] = @pos end @head_pos = @pos return ret else if @codes[@pos] == 0x2f ECMA262::LIT_DIV_OR_REGEXP_LITERAL else nil end end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_token\n result = peek_token\n @start = @finish\n return result if @start >= @expr.length\n\n if @expr[@start].numeric?\n @finish = @start + 1\n while @finish < @expr.length && @expr[@finish].to_s.numeric?\n @finish = @finish + 1\n end\n else\n @finish = @start + 1\n end\n result\n end", "def next\n ret = peek_next\n @str.slice! @last_re if ret.type != :eos\n\n ret\n end", "def next\n token = next_token\n token = next_token while token&.empty?\n token\n end", "def next_token\n token = @enum[@pointer]\n raise NonstringTokenError unless token.nil? || token.kind_of?(String) \n @pointer += 1\n token\n end", "def next_token; end", "def parse_lit\n case l.front.type\n when :str then parse_str_lit\n when :chr then parse_char_lit\n when :num then parse_num_lit\n else\n error \"expected a literal\"\n end\n end", "def peek_next\n fail 'No string specified' unless @str\n\n return Token.new(:eos) if skip_space == :eos\n\n PATTERNS.each do |re, func|\n re.match(@str) do |mat|\n @last_re = re # This is what will be removed\n mat = mat.to_s\n return func.is_a?(Symbol) ? send(func, mat) : instance_exec(mat, &func)\n end\n end\n end", "def _literal\n\n _save = self.pos\n while true # choice\n _tmp = apply(:_number)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_string)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_literal unless _tmp\n return _tmp\n end", "def read_next()\n return nil if @at_end\n\n begin\n pos = @marker.position\n\n if @marker.character == ?\\n\n pos.line += 1\n pos.column = 0\n end\n\n @marker.character = @reader.next\n @marker.source_index += 1\n pos.column += 1\n rescue StopIteration\n @at_end = true\n @marker.character = nil\n end\n\n @marker.character\n end", "def next()\n if @ss.scan_until(token_re)\n term = @ss.matched\n term_end = @ss.pos\n term_start = term_end - term.size\n else\n return nil\n end\n\n return Token.new(normalize(term), term_start, term_end)\n end", "def literal\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 22 )\n\n\n value = nil\n\n\n a = nil\n\n\n begin\n # at line 142:3: (a= INTEGER |a= FLOAT |a= BOOLEAN |a= STRING |a= CHAR )\n alt_38 = 5\n case look_38 = @input.peek( 1 )\n when INTEGER then alt_38 = 1\n when FLOAT then alt_38 = 2\n when BOOLEAN then alt_38 = 3\n when STRING then alt_38 = 4\n when CHAR then alt_38 = 5\n else\n @state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )\n\n\n\n raise NoViableAlternative( \"\", 38, 0 )\n\n end\n case alt_38\n when 1\n # at line 142:5: a= INTEGER\n a = match( INTEGER, TOKENS_FOLLOWING_INTEGER_IN_literal_1037 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Int, a.text) \n # <-- action\n end\n\n\n when 2\n # at line 143:5: a= FLOAT\n a = match( FLOAT, TOKENS_FOLLOWING_FLOAT_IN_literal_1047 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Float, a.text) \n # <-- action\n end\n\n\n when 3\n # at line 144:5: a= BOOLEAN\n a = match( BOOLEAN, TOKENS_FOLLOWING_BOOLEAN_IN_literal_1059 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Bool, a.text) \n # <-- action\n end\n\n\n when 4\n # at line 145:5: a= STRING\n a = match( STRING, TOKENS_FOLLOWING_STRING_IN_literal_1069 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:String, a.text) \n # <-- action\n end\n\n\n when 5\n # at line 146:5: a= CHAR\n a = match( CHAR, TOKENS_FOLLOWING_CHAR_IN_literal_1080 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Char, a.text) \n # <-- action\n end\n\n\n end\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 22 )\n\n\n end\n\n return value\n end", "def next()\n return \" \" unless has_next()\n if(@count <= 0)\n @char = @compressed_string[@i]\n @i += 1\n @count = get_count()\n end\n @count -= 1\n return @char\n end", "def next!() end", "def next\n\t\tif @next_token\n\t\t\ttoken = @next_token\n\t\t\t@next_token = nil\n\t\t\treturn token\n\t\telse\n\t\t\ttoken = read_token\n\t\t\treturn token\n\t\tend\n\tend", "def next()\n @index += 1\n @string[@index...(@index+1)]\n end", "def next_token\n tokens.shift\n end", "def next\n @tok ||= read_token\n @tok, tok = nil, @tok\n @prev = tok\n return tok\n end", "def process_lit(exp)\n # TODO what about floats and big numbers?\n\n value = exp.shift\n c_type = exp.c_type\n case c_type\n when CType.long, CType.float then\n return value.to_s\n when CType.symbol then\n return value.to_s.inspect # HACK wrong! write test!\n else\n raise \"Bug! no: Unknown literal #{value}:#{value.class}\"\n end\n end", "def peek_token\n return nil if @start >= @expr.length\n if @start == 0 && @finish == 0\n return @expr[0]\n else\n token = @expr[@start...@finish]\n\n if token.empty?\n @finish = @finish + 1\n peek_token\n else\n return token\n end\n end\n end", "def next_token\n return @extra_tokens.pop unless @extra_tokens.empty?\n\n skip_whitespace\n c = @sql[@pos, 1]\n return next_string(c) if quote?(c)\n\n first_is_identifier_char = identifier_char?(c)\n t = c\n @pos += 1\n while @pos < @length\n c = @sql[@pos, 1]\n break if c == ' '\n\n this_is_identifier_char = identifier_char?(c)\n break if first_is_identifier_char != this_is_identifier_char && @length > 0\n break if !this_is_identifier_char && quote?(c)\n\n t << c\n @pos += 1\n end\n\n case t\n when ''\n nil\n when /^\\d+$/\n t.to_i\n else\n t\n end\n end", "def next() end", "def next() end", "def next_token; @stack.shift; end", "def literal\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 33 )\n return_value = LiteralReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n root_0 = nil\n\n _last = _first_0 = nil\n string_literal267 = nil\n __IVAR268__ = nil\n __ID269__ = nil\n string_literal270 = nil\n string_literal271 = nil\n string_literal272 = nil\n string_literal273 = nil\n __NUMBER274__ = nil\n __STRING275__ = nil\n __DOC276__ = nil\n __REGEX277__ = nil\n __ARRAY278__ = nil\n __OBJECT280__ = nil\n string_literal282 = nil\n __ID283__ = nil\n string_literal286 = nil\n argument279 = nil\n property_definition281 = nil\n parameters284 = nil\n statement_block285 = nil\n parameters287 = nil\n statement_block288 = nil\n\n tree_for_string_literal267 = nil\n tree_for_IVAR268 = nil\n tree_for_ID269 = nil\n tree_for_string_literal270 = nil\n tree_for_string_literal271 = nil\n tree_for_string_literal272 = nil\n tree_for_string_literal273 = nil\n tree_for_NUMBER274 = nil\n tree_for_STRING275 = nil\n tree_for_DOC276 = nil\n tree_for_REGEX277 = nil\n tree_for_ARRAY278 = nil\n tree_for_OBJECT280 = nil\n tree_for_string_literal282 = nil\n tree_for_ID283 = nil\n tree_for_string_literal286 = nil\n\n begin\n # at line 229:3: ( 'this' | IVAR | ID | 'null' | 'true' | 'false' | 'undefined' | NUMBER | STRING | DOC | REGEX | ^( ARRAY ( argument )* ) | ^( OBJECT ( property_definition )* ) | ^( 'function' ( ID )? parameters statement_block ) | ^( '->' ( parameters )? statement_block ) )\n alt_41 = 15\n case look_41 = @input.peek( 1 )\n when THIS then alt_41 = 1\n when IVAR then alt_41 = 2\n when ID then alt_41 = 3\n when NULL then alt_41 = 4\n when TRUE then alt_41 = 5\n when FALSE then alt_41 = 6\n when UNDEFINED then alt_41 = 7\n when NUMBER then alt_41 = 8\n when STRING then alt_41 = 9\n when DOC then alt_41 = 10\n when REGEX then alt_41 = 11\n when ARRAY then alt_41 = 12\n when OBJECT then alt_41 = 13\n when FUNCTION then alt_41 = 14\n when ARROW then alt_41 = 15\n else\n raise NoViableAlternative( \"\", 41, 0 )\n end\n case alt_41\n when 1\n root_0 = @adaptor.create_flat_list\n\n\n # at line 229:5: 'this'\n _last = @input.look\n string_literal267 = match( THIS, TOKENS_FOLLOWING_THIS_IN_literal_1643 )\n\n tree_for_string_literal267 = @adaptor.copy_node( string_literal267 )\n\n @adaptor.add_child( root_0, tree_for_string_literal267 )\n\n\n\n when 2\n root_0 = @adaptor.create_flat_list\n\n\n # at line 230:5: IVAR\n _last = @input.look\n __IVAR268__ = match( IVAR, TOKENS_FOLLOWING_IVAR_IN_literal_1649 )\n\n tree_for_IVAR268 = @adaptor.copy_node( __IVAR268__ )\n\n @adaptor.add_child( root_0, tree_for_IVAR268 )\n\n\n\n when 3\n root_0 = @adaptor.create_flat_list\n\n\n # at line 231:5: ID\n _last = @input.look\n __ID269__ = match( ID, TOKENS_FOLLOWING_ID_IN_literal_1655 )\n\n tree_for_ID269 = @adaptor.copy_node( __ID269__ )\n\n @adaptor.add_child( root_0, tree_for_ID269 )\n\n\n\n when 4\n root_0 = @adaptor.create_flat_list\n\n\n # at line 232:5: 'null'\n _last = @input.look\n string_literal270 = match( NULL, TOKENS_FOLLOWING_NULL_IN_literal_1661 )\n\n tree_for_string_literal270 = @adaptor.copy_node( string_literal270 )\n\n @adaptor.add_child( root_0, tree_for_string_literal270 )\n\n\n\n when 5\n root_0 = @adaptor.create_flat_list\n\n\n # at line 233:5: 'true'\n _last = @input.look\n string_literal271 = match( TRUE, TOKENS_FOLLOWING_TRUE_IN_literal_1667 )\n\n tree_for_string_literal271 = @adaptor.copy_node( string_literal271 )\n\n @adaptor.add_child( root_0, tree_for_string_literal271 )\n\n\n\n when 6\n root_0 = @adaptor.create_flat_list\n\n\n # at line 234:5: 'false'\n _last = @input.look\n string_literal272 = match( FALSE, TOKENS_FOLLOWING_FALSE_IN_literal_1673 )\n\n tree_for_string_literal272 = @adaptor.copy_node( string_literal272 )\n\n @adaptor.add_child( root_0, tree_for_string_literal272 )\n\n\n\n when 7\n root_0 = @adaptor.create_flat_list\n\n\n # at line 235:5: 'undefined'\n _last = @input.look\n string_literal273 = match( UNDEFINED, TOKENS_FOLLOWING_UNDEFINED_IN_literal_1679 )\n\n tree_for_string_literal273 = @adaptor.copy_node( string_literal273 )\n\n @adaptor.add_child( root_0, tree_for_string_literal273 )\n\n\n\n when 8\n root_0 = @adaptor.create_flat_list\n\n\n # at line 236:5: NUMBER\n _last = @input.look\n __NUMBER274__ = match( NUMBER, TOKENS_FOLLOWING_NUMBER_IN_literal_1685 )\n\n tree_for_NUMBER274 = @adaptor.copy_node( __NUMBER274__ )\n\n @adaptor.add_child( root_0, tree_for_NUMBER274 )\n\n\n\n when 9\n root_0 = @adaptor.create_flat_list\n\n\n # at line 237:5: STRING\n _last = @input.look\n __STRING275__ = match( STRING, TOKENS_FOLLOWING_STRING_IN_literal_1691 )\n\n tree_for_STRING275 = @adaptor.copy_node( __STRING275__ )\n\n @adaptor.add_child( root_0, tree_for_STRING275 )\n\n\n\n when 10\n root_0 = @adaptor.create_flat_list\n\n\n # at line 238:5: DOC\n _last = @input.look\n __DOC276__ = match( DOC, TOKENS_FOLLOWING_DOC_IN_literal_1697 )\n\n tree_for_DOC276 = @adaptor.copy_node( __DOC276__ )\n\n @adaptor.add_child( root_0, tree_for_DOC276 )\n\n\n\n when 11\n root_0 = @adaptor.create_flat_list\n\n\n # at line 239:5: REGEX\n _last = @input.look\n __REGEX277__ = match( REGEX, TOKENS_FOLLOWING_REGEX_IN_literal_1703 )\n\n tree_for_REGEX277 = @adaptor.copy_node( __REGEX277__ )\n\n @adaptor.add_child( root_0, tree_for_REGEX277 )\n\n\n\n when 12\n root_0 = @adaptor.create_flat_list\n\n\n # at line 240:5: ^( ARRAY ( argument )* )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __ARRAY278__ = match( ARRAY, TOKENS_FOLLOWING_ARRAY_IN_literal_1711 )\n\n tree_for_ARRAY278 = @adaptor.copy_node( __ARRAY278__ )\n\n root_1 = @adaptor.become_root( tree_for_ARRAY278, root_1 )\n\n\n\n if @input.peek == DOWN\n match( DOWN, nil )\n # at line 240:14: ( argument )*\n while true # decision 37\n alt_37 = 2\n look_37_0 = @input.peek( 1 )\n\n if ( look_37_0.between?( AMP, AMP_ASGN ) || look_37_0 == POST_DECR || look_37_0.between?( GEQ, AREF ) || look_37_0.between?( GREATER, HAT ) || look_37_0.between?( ARROW, HAT_ASGN ) || look_37_0 == ASGN || look_37_0 == REGEX || look_37_0 == IN || look_37_0 == INCR || look_37_0.between?( INSTANCEOF, RSHIFT3 ) || look_37_0 == RSHIFT3_ASGN || look_37_0.between?( RSHIFT_ASGN, COLON ) || look_37_0 == LEQ || look_37_0.between?( LESS, SLASH ) || look_37_0 == SLASH_ASGN || look_37_0.between?( STAR, DECR ) || look_37_0 == STAR_ASGN || look_37_0 == LSHIFT || look_37_0.between?( DELETE, THIS ) || look_37_0.between?( MINUS, TILDE ) || look_37_0.between?( MINUS_ASGN, MOD ) || look_37_0.between?( MOD_ASGN, TYPEOF ) || look_37_0.between?( NEQ, UMINUS ) || look_37_0.between?( NEQQ, UNDEFINED ) || look_37_0 == NEW || look_37_0 == NOT || look_37_0.between?( NULL, UPLUS ) || look_37_0 == OBJECT || look_37_0.between?( EQ, OR_ASGN ) || look_37_0 == FALSE || look_37_0 == PIPE || look_37_0 == PIPE_ASGN || look_37_0 == PLUS || look_37_0.between?( ID, DOC ) )\n alt_37 = 1\n\n end\n case alt_37\n when 1\n # at line 240:14: argument\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_argument_IN_literal_1713 )\n argument279 = argument\n @state.following.pop\n\n @adaptor.add_child( root_1, argument279.tree )\n\n\n else\n break # out of loop for decision 37\n end\n end # loop for decision 37\n\n match( UP, nil )\n end\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 13\n root_0 = @adaptor.create_flat_list\n\n\n # at line 241:5: ^( OBJECT ( property_definition )* )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __OBJECT280__ = match( OBJECT, TOKENS_FOLLOWING_OBJECT_IN_literal_1724 )\n\n tree_for_OBJECT280 = @adaptor.copy_node( __OBJECT280__ )\n\n root_1 = @adaptor.become_root( tree_for_OBJECT280, root_1 )\n\n\n\n if @input.peek == DOWN\n match( DOWN, nil )\n # at line 241:15: ( property_definition )*\n while true # decision 38\n alt_38 = 2\n look_38_0 = @input.peek( 1 )\n\n if ( look_38_0 == GET || look_38_0 == COLON || look_38_0 == SET )\n alt_38 = 1\n\n end\n case alt_38\n when 1\n # at line 241:15: property_definition\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_property_definition_IN_literal_1726 )\n property_definition281 = property_definition\n @state.following.pop\n\n @adaptor.add_child( root_1, property_definition281.tree )\n\n\n else\n break # out of loop for decision 38\n end\n end # loop for decision 38\n\n match( UP, nil )\n end\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 14\n root_0 = @adaptor.create_flat_list\n\n\n # at line 242:5: ^( 'function' ( ID )? parameters statement_block )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal282 = match( FUNCTION, TOKENS_FOLLOWING_FUNCTION_IN_literal_1737 )\n\n tree_for_string_literal282 = @adaptor.copy_node( string_literal282 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal282, root_1 )\n\n\n\n match( DOWN, nil )\n # at line 242:19: ( ID )?\n alt_39 = 2\n look_39_0 = @input.peek( 1 )\n\n if ( look_39_0 == ID )\n alt_39 = 1\n end\n case alt_39\n when 1\n # at line 242:19: ID\n _last = @input.look\n __ID283__ = match( ID, TOKENS_FOLLOWING_ID_IN_literal_1739 )\n\n tree_for_ID283 = @adaptor.copy_node( __ID283__ )\n\n @adaptor.add_child( root_1, tree_for_ID283 )\n\n\n\n end\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_parameters_IN_literal_1742 )\n parameters284 = parameters\n @state.following.pop\n\n @adaptor.add_child( root_1, parameters284.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_statement_block_IN_literal_1744 )\n statement_block285 = statement_block\n @state.following.pop\n\n @adaptor.add_child( root_1, statement_block285.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 15\n root_0 = @adaptor.create_flat_list\n\n\n # at line 243:5: ^( '->' ( parameters )? statement_block )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal286 = match( ARROW, TOKENS_FOLLOWING_ARROW_IN_literal_1754 )\n\n tree_for_string_literal286 = @adaptor.copy_node( string_literal286 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal286, root_1 )\n\n\n\n match( DOWN, nil )\n # at line 243:13: ( parameters )?\n alt_40 = 2\n look_40_0 = @input.peek( 1 )\n\n if ( look_40_0 == PARAMS )\n alt_40 = 1\n end\n case alt_40\n when 1\n # at line 243:13: parameters\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_parameters_IN_literal_1756 )\n parameters287 = parameters\n @state.following.pop\n\n @adaptor.add_child( root_1, parameters287.tree )\n\n\n end\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_statement_block_IN_literal_1759 )\n statement_block288 = statement_block\n @state.following.pop\n\n @adaptor.add_child( root_1, statement_block288.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n end\n return_value.tree = @adaptor.rule_post_processing( root_0 )\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 33 )\n\n end\n \n return return_value\n end", "def next_token\n @tokens.shift\n end", "def next\n peek.tap { @position += 1 }\n end", "def next(pointer); end", "def next(pointer); end", "def next_cursor\n @result[:next_cursor]\n end", "def next_token\n @sy = @tokenizer.next_token\n \n # ignore EOL tokens since no productions would accept them\n while @sy.type == TokenType::EOL_TOKEN\n @sy = @tokenizer.next_token\n end\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n if ss.peek(1) == \"\\n\"\n self.lineno += 1\n # line starts 1 position after the newline\n self.start_of_current_line_pos = ss.pos + 1\n end\n self.old_pos = ss.pos\n token =\n case state\n when nil then\n case\n when ss.skip(/[ \\t]+/) then\n # do nothing\n when ss.skip(/\\/\\/[^\\r\\n]*/) then\n # do nothing\n when text = ss.scan(/\\r|\\n/) then\n newline text\n when text = ss.scan(/[!=<>]=?/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/[(){},;.\\-+\\/*]/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/#{DIGIT}+(\\.#{DIGIT}+)?/) then\n action { [:NUMBER, text] }\n when text = ss.scan(/nil/) then\n action { [:NIL, text] }\n when text = ss.scan(/false/) then\n action { [:FALSE, text] }\n when text = ss.scan(/true/) then\n action { [:TRUE, text] }\n when text = ss.scan(/#{ALPHA}(#{ALPHA}|#{DIGIT})*/) then\n action { [:IDENTIFIER, text] }\n when ss.skip(/\"\"/) then\n action { [:STRING, '\"\"'] }\n when ss.skip(/\"/) then\n [:state, :IN_STRING]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n when :IN_STRING then\n case\n when text = ss.scan(/[^\"]+/) then\n action { [:STRING, \"\\\"#{text}\\\"\"] }\n when ss.skip(/\"/) then\n [:state, nil]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def fetch!\n @last_instruction = @code[@instruction_pointer,3]\n @instruction_pointer += 3\n @last_instruction\n end", "def next()\n if has_next()\n @strings[0][1]-=1\n c = @strings[0][0]\n while has_next() and @strings[0][1] == 0\n @strings.shift\n end\n return c\n end\n return \" \"\n end", "def literal(buffer)\n reader = lambda { |string = ''|\n buffer.major_mode.read(1) do |event|\n if unicode = event.unicode\n string += unicode # copy\n buffer.message string.inspect\n\n case result = literal_handle(buffer, string)\n when nil\n reader.call(string)\n when String\n literal_insert(buffer, result)\n end\n else\n return # Unverrichteter Dinge\n end\n end\n }\n\n reader.call\n end", "def next\n displacement = @file.gets.try(:chomp).try(:to_f)\n return nil unless displacement\n\n ret = @curr_val\n @curr_val += displacement\n ret\n end", "def next_token\n\n if @ss.bol?\n @line+=1\n @[email protected]\n end\n\n position=[@line,@ss.pos-@old_pos+1]\n\n return :eos if @ss.eos?\n\n case\n when text = @ss.scan(NEWLINE)\n next_token()\n when text = @ss.scan(SPACE)\n next_token()\n when text = @ss.scan(COMMENT)\n next_token()\n when text = @ss.scan(ARROW)\n return Token.new [:arrow,text,position]\n when text = @ss.scan(LT)\n return Token.new [:lt,text,position]\n when text = @ss.scan(LBRACK)\n return Token.new [:lbrack,text,position]\n when text = @ss.scan(RBRACK)\n return Token.new [:rbrack,text,position]\n when text = @ss.scan(IDENTIFIER)\n case\n when value = text.match(IDENT)\n return Token.new [:IDENT,text,position]\n when value = text.match(FLOAT)\n return Token.new [:FLOAT,text,position]\n when value = text.match(INT)\n return Token.new [:INT,text,position]\n when value = text.match(STRING)\n return Token.new [:STRING,text,position]\n when value = text.match(MODULE)\n return Token.new [:module,text,position]\n when value = text.match(CLASS)\n return Token.new [:class,text,position]\n when value = text.match(END_)\n return Token.new [:end,text,position]\n when value = text.match(ATTR)\n return Token.new [:attr,text,position]\n when value = text.match(LPAREN)\n return Token.new [:lparen,text,position]\n when value = text.match(RPAREN)\n return Token.new [:rparen,text,position]\n else\n return Token.new [:identifier,text,position]\n end\n else\n x = @ss.getch\n return Token.new [x, x,position]\n end\n end", "def push_literal\n <<-CODE\n next_int;\n t1 = cpu_current_literals(state, c);\n t2 = fast_fetch(t1, _int);\n stack_push(t2);\n CODE\n end", "def literal_token\n if match = @chunk.match(OPERATOR)\n value, _ = *match\n else\n value = @chunk[0]\n end\n tag = value\n\n if COMPOUND_ASSIGN.include?(value)\n tag = :COP\n else\n case value\n when '(', '{', '[' then @ends.push(INVERSES[value])\n when ')', '}', ']'\n prev = @tokens[-1]\n pair(value)\n tokens.delete_at(-1) if prev && prev[0] == :TERM\n end\n end\n token(tag, value)\n value.size\n end", "def next_word\n return unless md = get.match(FORWARD_WORD, cursor)\n self.cursor = md.offset(0).last\n end", "def next\n block.instructions[index+1] || (block.next ? block.next.instructions.first : nil)\n end", "def next_token\n\t\[email protected]_token\n\tend", "def next\n if @next.is_a? TokenSource\n @next = @next.next\n return @next \n end\n @next\n end", "def next_char\n @pos += 1\n if (c = @source[@pos..@pos]) == BACKSLASH\n @pos += 1\n [true, @source[@pos..@pos]]\n else\n [false, c]\n end\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n token =\n case state\n when nil then\n case\n when text = ss.scan(/#{DIGIT}/) then\n action { [:DIGIT, text.to_i] }\n when text = ss.scan(/#{ADDITION}/) then\n action { [:ADDITION, text] }\n when text = ss.scan(/#{SUBSTRACTION}/) then\n action { [:SUBSTRACTION, text] }\n when text = ss.scan(/#{MULTIPLICATION}/) then\n action { [:MULTIPLICATION, text] }\n when text = ss.scan(/#{DIVISION}/) then\n action { [:DIVISION, text] }\n when text = ss.scan(/#{OPENING_PARANTHESIS}/) then\n action { [:OPENING_PARANTHESIS, text] }\n when text = ss.scan(/#{CLOSING_PARANTHESIS}/) then\n action { [:CLOSING_PARANTHESIS, text] }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def next\n if @state == :start && @scanner.eos?\n return nil\n else\n scan_next_token\n end\n end", "def next\n at(position + 1)\n end", "def la( count = 1 )\n until @lookahead.length >= count\n if token = @lexer.next_token( @lexer_state.number ) then\n @lookahead << token\n else\n nyi \"error handling for lexer error\" if @lexer.input_remaining?\n break\n end\n end\n \n return @lookahead[count - 1]\n end", "def next_char\n self.cursor += 1\n end", "def lex_en_expr_beg; end", "def lex_en_expr_beg; end", "def lex_en_expr_beg; end", "def next_token\n @current_token = @lexer.next_token\n end", "def process_lit(exp)\n # TODO: audit against obfuscator\n value = exp.shift\n case value\n when Integer then\n return \"LONG2NUM(#{value})\"\n when Float then\n return \"rb_float_new(#{value})\"\n when Symbol\n return \"ID2SYM(rb_intern(#{value.to_s.inspect}))\"\n when Range\n f = process_lit [ value.first ]\n l = process_lit [ value.last ]\n x = 0\n x = 1 if value.exclude_end?\n\n return \"rb_range_new(#{f}, #{l}, #{x})\"\n when Regexp\n src = value.source\n return \"rb_reg_new(#{src.inspect}, #{src.size}, #{value.options})\"\n else\n raise \"Bug! no: Unknown literal #{value}:#{value.class}\"\n end\n return nil\n end", "def find_literal(what)\n idx = @literals.index(what)\n return idx if idx\n add_literal(what)\n end", "def next_match char\n data = get_content\n row = focussed_index + 1\n row.upto(data.length-1) do |ix|\n val = data[ix].chomp rescue return # 2010-01-05 15:28 crashed on trueclass\n #if val[0,1] == char #and val != currval\n if val[0,1].casecmp(char) == 0 #AND VAL != CURRval\n return ix\n end\n end\n row = focussed_index - 1\n 0.upto(row) do |ix|\n val = data[ix].chomp\n #if val[0,1] == char #and val != currval\n if val[0,1].casecmp(char) == 0 #and val != currval\n return ix\n end\n end\n return -1\n end", "def peek\n @tokens[@position]\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n token =\n case state\n when nil then\n case\n when ss.skip(/\\s+/) then\n # do nothing\n when ss.skip(/:(#{SYMBOL_NAME})/o) then\n action { emit :tSYMBOL, &:to_sym }\n when ss.skip(/\"(.+?)\"/) then\n action { emit :tSTRING }\n when ss.skip(/[-+]?\\d+\\.\\d+/) then\n action { emit :tNUMBER, &:to_f }\n when ss.skip(/[-+]?\\d+/) then\n action { emit :tNUMBER, &:to_i }\n when ss.skip(/#{Regexp.union(\n %w\"( ) { | } [ ] < > $ ! ^ ` ... + * ? ,\"\n )}/o) then\n action { emit ss.matched, &:to_sym }\n when ss.skip(/#{REGEXP}/o) then\n action { emit_regexp }\n when ss.skip(/%?(#{CONST_NAME})/o) then\n action { emit :tPARAM_CONST }\n when ss.skip(/%([a-z_]+)/) then\n action { emit :tPARAM_NAMED }\n when ss.skip(/%(\\d*)/) then\n action { emit(:tPARAM_NUMBER) { |s| s.empty? ? 1 : s.to_i } } # Map `%` to `%1`\n when ss.skip(/_(#{IDENTIFIER})/o) then\n action { emit :tUNIFY }\n when ss.skip(/_/o) then\n action { emit :tWILDCARD }\n when ss.skip(/\\#(#{CALL})/o) then\n action { @state = :ARG; emit :tFUNCTION_CALL, &:to_sym }\n when ss.skip(/#{IDENTIFIER}\\?/o) then\n action { @state = :ARG; emit :tPREDICATE, &:to_sym }\n when ss.skip(/#{NODE_TYPE}/o) then\n action { emit :tNODE_TYPE, &:to_sym }\n when ss.skip(/\\#.*/) then\n action { emit_comment }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n when :ARG then\n case\n when ss.skip(/\\(/) then\n action { @state = nil; emit :tARG_LIST }\n when ss.skip(//) then\n action { @state = nil }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def next\n @stmt.step\n end", "def get_next_object\n tmp = ''\n while @base_str[@cursor] == ' '\n @cursor += 1 # skip whitespaces\n end\n\n string_detected = false\n case @base_str[cursor]\n when '\"'\n @cursor += 1\n string_detected = true\n\n when '{'\n return process_hash\n\n when '['\n return procecss_array\n\n end\n\n # check for empty value\n if string_detected && @base_str[@cursor] == '\"'\n @cursor += 1\n return ''\n end\n\n b_continue = true\n while b_continue\n char = @base_str[@cursor]\n if char == '\\\\'\n escaped_char = @base_str[@cursor + 1]\n case escaped_char\n when '\"'\n tmp << \"\\\"\"\n when 'a'\n tmp << \"\\a\"\n when 'b'\n tmp << \"\\b\"\n when 'r'\n tmp << \"\\r\"\n when 'n'\n tmp << \"\\n\"\n when 's'\n tmp << \"\\s\"\n when 't'\n tmp << \"\\t\"\n else # for single \\\n tmp << char\n @cursor -= 1 # compensate shifting below\n end\n # tmp << @base_str[@cursor + 1]\n @cursor += 2\n else\n tmp << char\n @cursor += 1\n end\n\n b_continue = if string_detected\n @base_str[@cursor] != '\"'\n else\n @base_str[@cursor] != ' ' &&\n @base_str[@cursor] != '}' &&\n @base_str[@cursor] != ']' &&\n @base_str[@cursor] != ','\n end\n end\n\n @cursor += 1 if string_detected # skip end quotes\n\n # puts \"found obj: '#{tmp}'\"\n unless string_detected\n tmp = tmp != 'null' ? eval(tmp) : nil\n end\n tmp\n end", "def next_token\n #dputs \"@line: \" + @line\n if @state == :normal\n while true\n temp = _next_token\n unless temp == \"#white_space\" || temp == \"#comment\"\n break\n end\n end\n #dputs \"token: \" + temp\n @current_token = temp\n return temp\n else\n return :Terminate\n end\n \n end", "def next_token\n\t\tif (token = @tokens.shift) != nil\n\t\t\t@copy << token\n\t\t\treturn token.get_token\n\t\telse\n\t\t\treturn nil\n\t\tend\n\tend", "def next( insn )\n # TODO\n end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def literal; end", "def next\n @next && @next.value\n end", "def next\r\n next_values[0]\r\n end", "def peek\n @tokens[@pos]\n end", "def next()\n result = current\n @index += 1\n @got_next_element = false\n @next_element = nil\n result\n end", "def getNextToken\n \n #Check if the end has been reached\n if @currentChar == nil\n return\n end\n if @currentChar.match(/\\s/) != nil\n skipWhitespaces\n end\n \n if @currentChar == '%'\n comment\n if @currentChar.match(/\\s/) != nil\n skipWhitespaces\n end\n end \n \n if @currentChar.match(/[A-Za-z0-9_]/) != nil\n return Token.new(NAME, name)\n end\n \n if @currentChar == \"\\\"\"\n return Token.new(STRING, string)\n end\n \n if @currentChar == '{'\n advance\n return Token.new(OPENING_BRACE,'{')\n end\n \n if @currentChar == '}'\n advance\n return Token.new(CLOSING_BRACE,'}')\n end\n \n if @currentChar == '['\n advance\n return Token.new(OPENING_BRACKET,'[')\n end\n \n if @currentChar == ']'\n advance\n return Token.new(CLOSING_BRACKET,']')\n end\n \n if @currentChar == ':'\n advance\n return Token.new(COLON,':')\n end\n \n if @currentChar == '*'\n advance\n return Token.new(ASTERIX,'*')\n end\n \n if @currentChar == '='\n advance\n return Token.new(EQUALS,'=')\n end\n \n if @currentChar == ';'\n advance\n return Token.new(SEMICOLON,';')\n end\n \n if @currentChar == '^'\n advance\n return Token.new(CIRCUMFLEX,'^')\n end\n \n if @currentChar == '+'\n advance\n return Token.new(PLUS,'+')\n end\n if @currentChar == '('\n advance\n return Token.new(OPENING_PARANTHESIS,'(')\n end\n if @currentChar == ')'\n advance\n return Token.new(CLOSING_PARANTHESIS,')')\n end\n if @currentChar == '.'\n advance\n return Token.new(DOT,'.')\n end\n if @currentChar == '#'\n advance\n return Token.new(HASH,'#')\n end\n if @currentChar == ','\n advance\n return Token.new(COMMA,',')\n end\n error\n \n return Token.new(EOF,'EOF') \n \n end", "def get_next_entry; end", "def advance\n @lookahead = next_token()\n end", "def next_token\n return [false, false] if @src.eos?\n# p @src.rest if @yydebug\n if ret = @src.scan(EM_OPEN_RE)\n @pre << ret\n [:EM_OPEN, ret]\n elsif ret = @src.scan(EM_CLOSE_RE)\n @pre << ret\n [:EM_CLOSE, ret]\n elsif ret = @src.scan(CODE_OPEN_RE)\n @pre << ret\n [:CODE_OPEN, ret]\n elsif ret = @src.scan(CODE_CLOSE_RE)\n @pre << ret\n [:CODE_CLOSE, ret]\n elsif ret = @src.scan(VAR_OPEN_RE)\n @pre << ret\n [:VAR_OPEN, ret]\n elsif ret = @src.scan(VAR_CLOSE_RE)\n @pre << ret\n [:VAR_CLOSE, ret]\n elsif ret = @src.scan(KBD_OPEN_RE)\n @pre << ret\n [:KBD_OPEN, ret]\n elsif ret = @src.scan(KBD_CLOSE_RE)\n @pre << ret\n [:KBD_CLOSE, ret]\n elsif ret = @src.scan(INDEX_OPEN_RE)\n @pre << ret\n [:INDEX_OPEN, ret]\n elsif ret = @src.scan(INDEX_CLOSE_RE)\n @pre << ret\n [:INDEX_CLOSE, ret]\n elsif ret = @src.scan(REF_OPEN_RE)\n @pre << ret\n [:REF_OPEN, ret]\n elsif ret = @src.scan(REF_CLOSE_RE)\n @pre << ret\n [:REF_CLOSE, ret]\n elsif ret = @src.scan(FOOTNOTE_OPEN_RE)\n @pre << ret\n [:FOOTNOTE_OPEN, ret]\n elsif ret = @src.scan(FOOTNOTE_CLOSE_RE)\n @pre << ret\n [:FOOTNOTE_CLOSE, ret]\n elsif ret = @src.scan(VERB_OPEN_RE)\n @pre << ret\n [:VERB_OPEN, ret]\n elsif ret = @src.scan(VERB_CLOSE_RE)\n @pre << ret\n [:VERB_CLOSE, ret]\n elsif ret = @src.scan(BAR_RE)\n @pre << ret\n [:BAR, ret]\n elsif ret = @src.scan(QUOTE_RE)\n @pre << ret\n [:QUOTE, ret]\n elsif ret = @src.scan(SLASH_RE)\n @pre << ret\n [:SLASH, ret]\n elsif ret = @src.scan(BACK_SLASH_RE)\n @pre << ret\n [:BACK_SLASH, ret]\n elsif ret = @src.scan(URL_RE)\n @pre << ret\n [:URL, ret]\n elsif ret = @src.scan(OTHER_RE)\n @pre << ret\n [:OTHER, ret]\n else\n ret = @src.rest\n @pre << ret\n @src.terminate\n [:OTHER, ret]\n end\nend", "def next_item\n return @last_lexeme if @last_lexeme[0].nil?\n while true\n @line = next_line if buffer_empty?\n if @line.nil?\n lexeme = [nil, @line_no, 1]\n break\n end\n\n # Skip whitespaces\n while space?(@line[@pos])\n @pos += 1\n end\n\n # Skip triple dot characters (join lines)\n if @line[@pos, 4] == \"...\\n\" || @line[@pos, 2] == \"…\\n\"\n line_no, pos = @line_no, @pos + 1\n @line, @pos = next_line, 0\n if @line.nil? || @line.strip.empty?\n raise SyntaxError.new(line_no, pos, 'Line continuation may not be followed by an empty line')\n end\n next\n end\n\n # Skip one line comments\n if @line[@pos, 3] == 'BTW'\n @pos = @line.length - 1\n end\n # and multiline ones\n if @last_lexeme[0] == \"\\n\" && @line[@pos, 4] == 'OBTW'\n tldr_found, line_no, pos = false, @line_no, @pos + 1\n while true\n @line = next_line\n break if @line.nil?\n m = @line.chomp.match(/(^|\\s+)TLDR\\s*(,|$)/)\n unless m.nil?\n tldr_found = true\n @pos = m.end(0)\n break\n end\n end\n unless tldr_found\n raise SyntaxError.new(line_no, pos, 'Unterminated multiline comment')\n end\n next\n end\n\n if @line[@pos] == \"\\n\" || @line[@pos] == '!'\n # Handle newline and bang separately\n lexeme = [@line[@pos], @line_no, @pos + 1]\n @pos += 1\n elsif @line[@pos] == ','\n # Comma is a virtual newline\n lexeme = [\"\\n\", @line_no, @pos + 1]\n @pos += 1\n elsif @line[@pos] == '\"'\n # Strings begin with \"\n # Need to handle empty strings separately\n if @line[@pos + 1] == '\"'\n string = '\"\"'\n else\n m = @line.match(/([^:](?:::)*)\"/, @pos + 1)\n string = @line[@pos..m.end(0) - 1] unless m.nil?\n end\n # String must be followed by an allowed lexeme delimiter\n if string.nil? || !lexeme_delimiter?(@pos + string.length)\n raise SyntaxError.new(@line_no, @pos + 1, 'Unterminated string constant')\n end\n lexeme = [%Q[\"#{escape_string(string[1..-2])}\"], @line_no, @pos + 1]\n @pos = @pos + string.length\n else\n # Grab as much characters as we can until meeting lexeme delimiter\n # Treat what we grabbed as a lexeme\n seq, pos = '', @pos + 1\n until lexeme_delimiter?(@pos)\n seq += @line[@pos]\n @pos += 1\n end\n lexeme = [seq, @line_no, pos]\n end\n\n break\n end\n @last_lexeme = lexeme\n end", "def next_item\n lexeme, token = @lexer.next, nil\n if lexeme[0].nil?\n token = { type: :eof }\n elsif lexeme[0].lol_string?\n token = { type: :string, data: lexeme[0][1..-2] }\n elsif lexeme[0].lol_integer?\n token = { type: :integer, data: lexeme[0].to_i }\n elsif lexeme[0].lol_float?\n token = { type: :float, data: lexeme[0].to_f }\n elsif lexeme[0].lol_boolean?\n token = { type: :boolean, data: (lexeme[0] == 'WIN') }\n elsif lexeme[0] == '!'\n token = { type: :exclamation }\n elsif lexeme[0] == \"\\n\"\n token = { type: :newline }\n else\n # Try to match keyword\n token_type = match_longest(lexeme[0], @token_table)\n unless token_type.nil?\n token = { type: token_type }\n # Consume all peeked lexemes\n token_type.to_s.count('_').times { @lexer.next }\n else\n # Try to match identifier\n if lexeme[0].lol_identifier?\n token = { type: :identifier, data: lexeme[0] }\n end\n end\n end\n raise UnknownTokenError.new(lexeme) if token.nil?\n token.merge(line: lexeme[1], pos: lexeme[2])\n end", "def peek\n @tokens.at(@current)\n end", "def next_line\r\n while true\r\n if (@lexemes[@pointer] != nil && @lexemes[@pointer] != '@')\r\n @pointer = @pointer.next\r\n else\r\n break\r\n end\r\n end\r\n end", "def next(backreferences)\n\t\tif @fragment_spec[:backreference]\n\t\t\tif @index < 0\n\t\t\t\tb = @fragment_spec[:backreference]\n\t\t\t\t@current = backreferences[b - 1] # could be nil\n\t\t\telse\n\t\t\t\t@current = nil\n\t\t\tend\n\t\t\t@index += 1\n\t\telse\n\t\t\tif @index > @max_index\n\t\t\t\t@current = @@NO_MORE\n\t\t\telse\n\t\t\t\t@index += 1\n\t\t\t\t@current = @all[@index]\n\t\t\tend\n\t\tend\n\n\t\treturn @current\n\tend", "def advance\n r = yylex\n self.token = r\n\n raise \"yylex returned nil\" unless r\n\n return RubyLexer::EOF != r\n end", "def next\n next? ? @current + 1 : nil\n end", "def peek\n @tok ||= read_token\n end", "def next\n\n if (@local_iterator && @local_iterator.has_next?)\n @local_iterator.get_next\n else\n nil\n end\n\n end", "def next\n self.offset(1)\n end", "def next_token\n \n # Early return if there is nothing to be read. This means we've reached the end of the file.\n \n unless @file[@pos]\n return nil\n end\n \n # This is the token that will be returned.\n token = Compiler::Token.new\n \n # Initializes a new instance of the automaton.\n automaton = Automaton.new\n \n # Will be set inside the loop, if necessary.\n increment_next = false\n \n # Will be set inside the loop. Marks whether we've reached the end of the file.\n eof = false\n \n # Build a new token while we don't have a new word yet and isn't in the failed state\n while ((automaton.state != :A || automaton.word.empty?) && automaton.state != :failed)\n \n # The next input for the automaton\n char = @file[@pos]\n \n if char\n \n # Moves the pointer to the next char\n @pos += 1\n \n automaton.transition(char)\n \n # While the automaton hasn't started to build a new word yet, increments the line and column numbers.\n # In this phase, we're just skipping blank characters\n if automaton.word.empty?\n if increment_next\n if char == \"\\n\"\n increment_next = true\n else\n increment_next = false\n end\n @line += 1\n @column = 0\n elsif char == \"\\n\"\n @column += 1\n increment_next = true\n else\n @column += 1\n end\n end\n \n else\n eof = true\n puts \"breaking\"\n break\n end\n end\n \n \n \n if eof\n automaton.transition(\"\\n\")\n else\n @pos -= 1\n end\n \n if (automaton.type == :identifier) && (Compiler.reserved_words.is_reserved?(automaton.word))\n token.type = :reserved_word\n else\n token.type = automaton.type\n end\n \n token.value = automaton.word\n token.line = @line\n token.column = @column\n \n return token\n \n end", "def peek_next()\n return nil if @at_end\n\n begin\n @reader.peek\n rescue StopIteration\n nil\n end\n end", "def next\n raise IOError.new(\"Stream is at the end of file.\") if eof?\n end_of_token = false\n token = \"\"\n while not end_of_token\n c = @file.getc\n puts \"next c: #{c.inspect} v: #{valid_char?(c)} s: #{single_char?(c)} e: #{is_end_character?(c)}\" if @debug\n if eof? then\n end_of_token = true\n elsif (single_char?(c)) then\n if (token.empty?) then\n token = c\n next_token = @file.getc\n if ('#' == token and '#' == next_token) then\n token << next_token\n else\n @file.seek(-1, IO::SEEK_CUR)\n end\n else\n @file.seek(-1, IO::SEEK_CUR)\n end\n end_of_token = true\n elsif (valid_char?(c)) then\n token << c\n elsif is_end_character?(c) then\n move_till_next_token\n end_of_token = (not token.empty?)\n end\n end\n puts \"next\" if @debug\n build_token(token)\n end", "def set_literal\n <<-CODE\n next_int;\n tuple_put(state, cpu_current_literals(state, c), _int, stack_top());\n CODE\n end", "def consume\n @current = @tokens[@pos]\n @pos += 1 if @current\n @current\n end", "def racc_read_token(t, tok, val); end", "def match(ptr, depth = 0)\n case c = ptr.peek(1)\n when '\"', '`'\n start_pos = ptr.pos\n ptr.pos += 1\n AST.new(:string, value: ptr.scan_until(/#{c}/).chop,\n attributes: { type: char_to_type(c) },\n pos: start_pos)\n end\n end", "def next_token\n @state = 1\n value = \"\"\n recovery_data = [0, 0]\n\n while [email protected]?\n char = @stream.read(1)\n next_state = get_next_state(char)\n\n # Move to the next state.\n if next_state\n if recognizable?\n recovery_data = [@state, 0]\n end\n\n value << char\n recovery_data[1] += 1\n @state = next_state\n else\n # Recognise the final token.\n if recognizable?\n @stream.seek(@stream.pos - 1)\n break\n else\n # Recoverable error.\n if recovery_data[0] > 0\n value = recover_from_error!(recovery_data, value)\n break\n # Fatal lexical error.\n else\n raise Bolverk::ASM::LexicalError, \"Disallowed token: #{char} on line #{@stream.line_number}\"\n end\n end\n end\n end\n\n build_token(value)\n end", "def read_next_token(token_class)\n if @next_token\n return @next_token\n else\n # check for a match on the specified class first\n if match(token_class)\n return @next_token\n else\n # now check all the tokens for a match\n Taxonifi::Splitter::Tokens.send(@token_list).each {|t|\n return @next_token if match(t)\n }\n end\n # no match, either end of string or lex-error\n if @input != ''\n raise(Taxonifi::Splitter::SplitterError, \"Lexer Error, unknown token at |#{@input[0..20]}...\", caller)\n else\n return nil\n end\n end\n end", "def next_char\n @current_char_pos += 1\n @current_char = @string[@current_char_pos, 1]\n end", "def next_token\r\n if [email protected]?\r\n\t\t\t@token = @token.following\r\n\t\t\treturn @token.value\r\n\t\tend\r\n\r\n\t\tscan(@token)\r\n\t\treturn @token.value\r\n end", "def consume()\n la(1)\n return @lookahead.shift\n end", "def peek_token(n = 0)\n raise ArgumentError.new(\"can't look back in the token stream\") if n < 0\n @enum[@pointer + n]\n end", "def get_char\n @look = @expression[@number]\n @number +=1\nend", "def peek_lit(hint)\n pos0 = @pos\n while lit = next_input_element(hint) and (lit.ws? or lit.lt?)\n end\n @pos = pos0\n lit\n end" ]
[ "0.6730959", "0.64939874", "0.62412304", "0.6231001", "0.6211954", "0.6209109", "0.6181599", "0.6154133", "0.6126161", "0.61195827", "0.6113218", "0.60787266", "0.6060403", "0.605908", "0.59823674", "0.5961887", "0.59591913", "0.59438837", "0.5924103", "0.59197617", "0.591921", "0.591921", "0.58952343", "0.5878258", "0.5874845", "0.5808844", "0.5795805", "0.5795805", "0.57910377", "0.57650435", "0.5745763", "0.57457286", "0.57191944", "0.5719191", "0.56972533", "0.5696059", "0.5689126", "0.56696075", "0.5666355", "0.5662465", "0.5640493", "0.5634174", "0.56275177", "0.56255716", "0.560788", "0.55821615", "0.55818605", "0.5569368", "0.5557107", "0.5557107", "0.5557107", "0.5538685", "0.5519883", "0.5518041", "0.5516868", "0.55108756", "0.5486272", "0.5469479", "0.5467198", "0.5463845", "0.5431624", "0.54245067", "0.54213965", "0.54213965", "0.54213965", "0.54213965", "0.54113", "0.5411026", "0.54103833", "0.5408622", "0.54039645", "0.5403409", "0.539834", "0.5397395", "0.5396555", "0.53961647", "0.5393746", "0.5389894", "0.5381127", "0.53722566", "0.5371636", "0.5366786", "0.5361142", "0.5354396", "0.5344844", "0.53416824", "0.5336974", "0.5326351", "0.531589", "0.529773", "0.52962863", "0.52940035", "0.5293696", "0.5288437", "0.5284045", "0.5283664", "0.5279979", "0.5277505", "0.52677727", "0.526631" ]
0.5853007
25
Tests next literal is WhiteSpace or not. If literal is WhiteSpace return ECMA262::WhiteSpace object and forward lexical parser position. Otherwise return nil and position is not changed. Even if next literal is sequence of two or more white spaces, this method returns only one white space.
def white_space if white_space?(@codes[@pos]) begin @pos += 1 end until !white_space?(@codes[@pos]) return ECMA262::WhiteSpace.get else nil end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nextWhite()\r\n str = \"\"\r\n while /\\s/.match?(@c)\r\n str += @c\r\n nextCh()\r\n end\r\n \r\n return Token.new(Token::WHITESPACE, str)\r\n end", "def parse_whitespace\n @lexer.next! while @lexer.get and @lexer.get.type == :whitespace\n true\n end", "def peek_no_space\n return @tokens.first unless @tokens.first.class == TkSPACE\n @tokens[1]\n end", "def whitespace_token\n return if !(match = @chunk.match(WHITESPACE)) || (@chunk[0] == \"\\n\")\n prev = @tokens[-1]\n prev.send(match ? :spaced= : :new_line=, true) if prev\n match ? match[0].size : 0\n end", "def skip_whitespace\n self.advance while self.current == \" \"\n end", "def skip_white_space_or_to_eoln\r\n while (next_char = @source.get)\r\n return next_char if (next_char > ' ') || @source.eoln?\r\n end\r\n end", "def skip_whitespace()\n current = @marker.character\n (current = read_next()) while current == ' ' || current == ?\\t || current == ?\\r\n end", "def whitespace_token\n if md=WHITESPACE.match(@chunk)\n input = md.to_a[0]\n input.length\n else\n did_not_match\n end\n end", "def whitespace_token\n return nil unless md=WHITESPACE.match(@chunk)\n input = md.to_a[0]\n input.length\n end", "def skip_spaces\n ws = { ?\\s => true, ?\\n => true, ?\\r => true, ?\\t => true }\n while r = self.getc\n unless ws[r] then\n self.ungetc(r)\n break\n end\n end\n nil\n end", "def parse_whitespace\n match_regexp(/[ \\t]/)\n end", "def _ast_sp\n while true # kleene\n\n begin # choice\n _tmp = match_string(\" \")\n break if _tmp\n _tmp = match_string(\"\\t\")\n end while false # end choice\n\n break unless _tmp\n end\n _tmp = true # end kleene\n set_failed_rule :_ast_sp unless _tmp\n return _tmp\n end", "def skip_white_spaces\n while char == \" \" || char == \"\\n\" do\n go_next\n end\n end", "def _space\n\n begin # choice\n _tmp = match_string(\" \")\n break if _tmp\n _tmp = match_string(\"\\t\")\n break if _tmp\n _tmp = apply(:_eol)\n end while false # end choice\n\n set_failed_rule :_space unless _tmp\n return _tmp\n end", "def _space\n\n _save = self.pos\n while true # choice\n _tmp = match_string(\" \")\n break if _tmp\n self.pos = _save\n _tmp = match_string(\"\\t\")\n break if _tmp\n self.pos = _save\n _tmp = apply(:_eol)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_space unless _tmp\n return _tmp\n end", "def _linear_white_space\n _save = self.pos\n\n _save1 = self.pos\n while true # sequence\n _save2 = self.pos\n _tmp = apply(:_CRLF)\n unless _tmp\n _tmp = true\n self.pos = _save2\n end\n unless _tmp\n self.pos = _save1\n break\n end\n _tmp = apply(:_LWSP_char)\n unless _tmp\n self.pos = _save1\n end\n break\n end # end sequence\n\n if _tmp\n while true\n\n _save3 = self.pos\n while true # sequence\n _save4 = self.pos\n _tmp = apply(:_CRLF)\n unless _tmp\n _tmp = true\n self.pos = _save4\n end\n unless _tmp\n self.pos = _save3\n break\n end\n _tmp = apply(:_LWSP_char)\n unless _tmp\n self.pos = _save3\n end\n break\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true\n else\n self.pos = _save\n end\n set_failed_rule :_linear_white_space unless _tmp\n return _tmp\n end", "def _wsp\n while true\n\n _save1 = self.pos\n while true # choice\n _tmp = match_string(\" \")\n break if _tmp\n self.pos = _save1\n _tmp = match_string(\"\\t\")\n break if _tmp\n self.pos = _save1\n _tmp = match_string(\"\\n\")\n break if _tmp\n self.pos = _save1\n _tmp = apply(:_comment)\n break if _tmp\n self.pos = _save1\n break\n end # end choice\n\n break unless _tmp\n end\n _tmp = true\n set_failed_rule :_wsp unless _tmp\n return _tmp\n end", "def _space\n\n _save = self.pos\n while true # choice\n _tmp = match_string(\" \")\n break if _tmp\n self.pos = _save\n _tmp = match_string(\"\\\\f\")\n break if _tmp\n self.pos = _save\n _tmp = match_string(\"\\\\v\")\n break if _tmp\n self.pos = _save\n _tmp = match_string(\"\\\\t\")\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_space unless _tmp\n return _tmp\n end", "def _SPACE\n _tmp = match_string(\" \")\n set_failed_rule :_SPACE unless _tmp\n return _tmp\n end", "def get_leading_whitespace_from_text(text)\n leading_whitespace = \"\"\n text_copy = text * 1\n i = 0\n while text_copy[i] != nil do\n if \" \\t\"[text_copy[i]] == nil\n if i > 0\n leading_whitespace = text[0..i-1]\n end\n break\n end\n \n i += 1\n end\n \n leading_whitespace\n end", "def scan_whitespace\n\t\t\tappend_scan(/[\\t\\x20]+/, :Spacing, nil)\n\t\tend", "def peek_no_space; end", "def _Space\n\n _save = self.pos\n while true # choice\n _tmp = match_string(\" \")\n break if _tmp\n self.pos = _save\n _tmp = match_string(\"\\\\t\")\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_Space unless _tmp\n return _tmp\n end", "def __skip_contiguous_whitespace(index)\n lim = self.__size\n while(index < lim)\n char = self.__ordAt(index)\n return index unless char <= 32 and __is_whitespace(char) # \\t \\n etc. are less than space which is 32\n index += 1\n end\n return index\n end", "def reduce_whitespace(_production, _range, _tokens, _children)\n char_shorthand('s')\n end", "def scan_for_space(token); end", "def _spaces\n _save = self.pos # repetition\n _count = 0\n while true\n\n begin # choice\n _tmp = apply(:_space)\n break if _tmp\n _tmp = apply(:_comment)\n end while false # end choice\n\n break unless _tmp\n _count += 1\n end\n _tmp = _count >= 1\n unless _tmp\n self.pos = _save\n end # end repetition\n set_failed_rule :_spaces unless _tmp\n return _tmp\n end", "def test_whitespace1\n token, value, rest = @c.lex(\" hello there \")\n assert_equal(:identifier, token)\n assert_equal('hello', value)\n assert_equal(' there ', rest)\n end", "def test_whitespace2\n token, value, rest = @c.lex(\" \\n hello there \")\n assert_equal(:identifier, token)\n assert_equal('hello', value)\n assert_equal(' there ', rest)\n end", "def _space\n\n _save = self.pos\n while true # choice\n _tmp = match_string(\" \")\n break if _tmp\n self.pos = _save\n _tmp = match_string(\"\\t\")\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n return _tmp\n end", "def _sp\n while true\n\n _save1 = self.pos\n while true # choice\n _tmp = match_string(\" \")\n break if _tmp\n self.pos = _save1\n _tmp = match_string(\"\\t\")\n break if _tmp\n self.pos = _save1\n _tmp = apply(:_comment)\n break if _tmp\n self.pos = _save1\n break\n end # end choice\n\n break unless _tmp\n end\n _tmp = true\n set_failed_rule :_sp unless _tmp\n return _tmp\n end", "def remove_whitespace_before(index, buffer, rewriter, remove_preceeding_newline)\n end_pos = index\n begin_pos = end_pos - 1\n begin_pos -= 1 while code[begin_pos] =~ /\\s/ && code[begin_pos] != \"\\n\"\n begin_pos -= 1 if code[begin_pos] == \"\\n\"\n begin_pos -= 1 if code[begin_pos] == \"\\n\" && remove_preceeding_newline\n return if begin_pos.next == end_pos\n rewriter.remove Parser::Source::Range.new(buffer, begin_pos.next, end_pos)\n end", "def reduce_no_whitespace(_production, _range, _tokens, _children)\n char_shorthand('S')\n end", "def skip_space\n @str.slice!(/\\A[ \\t]+/); # Not \\s, we want to capture EOL characters\n @str.empty? ? :eos : :ok\n end", "def charIsWhitespace\n c = getChar\n return (c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\")\n end", "def _ws\n while true\n\n _save1 = self.pos\n while true # choice\n _tmp = apply(:_space)\n break if _tmp\n self.pos = _save1\n _tmp = apply(:_comment)\n break if _tmp\n self.pos = _save1\n break\n end # end choice\n\n break unless _tmp\n end\n _tmp = true\n set_failed_rule :_ws unless _tmp\n return _tmp\n end", "def next_token(str, start_pos)\n look_at = str[start_pos..-1]\n next_nonblank_pos = start_pos + (look_at =~ /\\S/ || 0)\n next_blank_pos =\n if next_match = str[next_nonblank_pos..-1] =~ /\\s/\n next_nonblank_pos + next_match\n else\n str.size\n end\n return [next_blank_pos, str[next_nonblank_pos...next_blank_pos]]\n end", "def _spaces\n _save = self.pos\n\n _save1 = self.pos\n while true # choice\n _tmp = apply(:_space)\n break if _tmp\n self.pos = _save1\n _tmp = apply(:_comment)\n break if _tmp\n self.pos = _save1\n break\n end # end choice\n\n if _tmp\n while true\n \n _save2 = self.pos\n while true # choice\n _tmp = apply(:_space)\n break if _tmp\n self.pos = _save2\n _tmp = apply(:_comment)\n break if _tmp\n self.pos = _save2\n break\n end # end choice\n\n break unless _tmp\n end\n _tmp = true\n else\n self.pos = _save\n end\n set_failed_rule :_spaces unless _tmp\n return _tmp\n end", "def isWhiteSpace(code)\n return true if (code >= 0x2000 && code <= 0x200A)\n case code\n when 0x09, # \\t\n 0x0A, # \\n\n 0x0B, # \\v\n 0x0C, # \\f\n 0x0D, # \\r\n 0x20,\n 0xA0,\n 0x1680,\n 0x202F,\n 0x205F,\n 0x3000\n return true\n end\n return false\n end", "def consume_whitespace(input)\n while /\\s/ =~ input.look_ahead do\n input.consume\n end\n end", "def nil_if_whitespace\n\n\t\tNilIfWhitespace_Helper_.string_nil_if_whitespace_array_ self\n\tend", "def single_line_comment\n # //\n if @codes[@pos] == 0x2f and @codes[@pos + 1] == 0x2f\n @pos += 2\n pos0 = @pos\n while (code = @codes[@pos]) and !line_terminator?(code)\n @pos += 1\n end\n return ECMA262::SingleLineComment.new(@codes[pos0...@pos].pack(\"U*\"))\n else\n nil\n end\n end", "def whitespace_indents\n @whitespace_indents ||= contents.lines.\n map { |line| line.match(/^( *|\\t*)\\S|/)[1] }.\n reject(&:nil?)\n end", "def next_token\n #dputs \"@line: \" + @line\n if @state == :normal\n while true\n temp = _next_token\n unless temp == \"#white_space\" || temp == \"#comment\"\n break\n end\n end\n #dputs \"token: \" + temp\n @current_token = temp\n return temp\n else\n return :Terminate\n end\n \n end", "def is_white(c)\n # c =~ /\\s/\n [\"\\t\", \" \"].include?(c)\nend", "def space!\r\n # -> uncomment the next line to manually enable rule tracing\r\n # trace_in( __method__, 3 )\r\n\r\n type = SPACE\r\n channel = ANTLR3::DEFAULT_CHANNEL\r\n\r\n \r\n # - - - - main rule block - - - -\r\n # at line 12:9: ( ' ' )+\r\n # at file 12:9: ( ' ' )+\r\n match_count_2 = 0\r\n while true\r\n alt_2 = 2\r\n look_2_0 = @input.peek( 1 )\r\n\r\n if ( look_2_0 == 0x20 )\r\n alt_2 = 1\r\n\r\n end\r\n case alt_2\r\n when 1\r\n # at line 12:9: ' '\r\n match( 0x20 )\r\n\r\n else\r\n match_count_2 > 0 and break\r\n eee = EarlyExit(2)\r\n\r\n\r\n raise eee\r\n end\r\n match_count_2 += 1\r\n end\r\n\r\n # --> action\r\n channel = HIDDEN\r\n # <-- action\r\n\r\n \r\n @state.type = type\r\n @state.channel = channel\r\n\r\n ensure\r\n # -> uncomment the next line to manually enable rule tracing\r\n # trace_out( __method__, 3 )\r\n\r\n end", "def next_token\n return @extra_tokens.pop unless @extra_tokens.empty?\n\n skip_whitespace\n c = @sql[@pos, 1]\n return next_string(c) if quote?(c)\n\n first_is_identifier_char = identifier_char?(c)\n t = c\n @pos += 1\n while @pos < @length\n c = @sql[@pos, 1]\n break if c == ' '\n\n this_is_identifier_char = identifier_char?(c)\n break if first_is_identifier_char != this_is_identifier_char && @length > 0\n break if !this_is_identifier_char && quote?(c)\n\n t << c\n @pos += 1\n end\n\n case t\n when ''\n nil\n when /^\\d+$/\n t.to_i\n else\n t\n end\n end", "def next()\n if @ss.scan_until(token_re)\n term = @ss.matched\n term_end = @ss.pos\n term_start = term_end - term.size\n else\n return nil\n end\n\n return Token.new(normalize(term), term_start, term_end)\n end", "def next_word\n start = @optr\n len = @txt.length\n\n while @optr < len && @txt[@optr].char != \" \"\n @optr += 1\n end\n\n while @optr < len && @txt[@optr].char == \" \"\n @optr += 1\n end\n\n @txt[start...@optr]\n end", "def is_space(input)\n input[0] == \" \"\nend", "def next()\n if has_next()\n @strings[0][1]-=1\n c = @strings[0][0]\n while has_next() and @strings[0][1] == 0\n @strings.shift\n end\n return c\n end\n return \" \"\n end", "def space()\n # pocitanie riadkov\n self.line.push position if /\\n/ === data[position]\n /\\s/ === data[position]\n\n end", "def next_token\n @sy = @tokenizer.next_token\n \n # ignore EOL tokens since no productions would accept them\n while @sy.type == TokenType::EOL_TOKEN\n @sy = @tokenizer.next_token\n end\n end", "def next_token\n\n if @ss.bol?\n @line+=1\n @[email protected]\n end\n\n position=[@line,@ss.pos-@old_pos+1]\n\n return :eos if @ss.eos?\n\n case\n when text = @ss.scan(NEWLINE)\n next_token()\n when text = @ss.scan(SPACE)\n next_token()\n when text = @ss.scan(COMMENT)\n next_token()\n when text = @ss.scan(ARROW)\n return Token.new [:arrow,text,position]\n when text = @ss.scan(LT)\n return Token.new [:lt,text,position]\n when text = @ss.scan(LBRACK)\n return Token.new [:lbrack,text,position]\n when text = @ss.scan(RBRACK)\n return Token.new [:rbrack,text,position]\n when text = @ss.scan(IDENTIFIER)\n case\n when value = text.match(IDENT)\n return Token.new [:IDENT,text,position]\n when value = text.match(FLOAT)\n return Token.new [:FLOAT,text,position]\n when value = text.match(INT)\n return Token.new [:INT,text,position]\n when value = text.match(STRING)\n return Token.new [:STRING,text,position]\n when value = text.match(MODULE)\n return Token.new [:module,text,position]\n when value = text.match(CLASS)\n return Token.new [:class,text,position]\n when value = text.match(END_)\n return Token.new [:end,text,position]\n when value = text.match(ATTR)\n return Token.new [:attr,text,position]\n when value = text.match(LPAREN)\n return Token.new [:lparen,text,position]\n when value = text.match(RPAREN)\n return Token.new [:rparen,text,position]\n else\n return Token.new [:identifier,text,position]\n end\n else\n x = @ss.getch\n return Token.new [x, x,position]\n end\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n if ss.peek(1) == \"\\n\"\n self.lineno += 1\n # line starts 1 position after the newline\n self.start_of_current_line_pos = ss.pos + 1\n end\n self.old_pos = ss.pos\n token =\n case state\n when nil then\n case\n when ss.skip(/[ \\t]+/) then\n # do nothing\n when ss.skip(/\\/\\/[^\\r\\n]*/) then\n # do nothing\n when text = ss.scan(/\\r|\\n/) then\n newline text\n when text = ss.scan(/[!=<>]=?/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/[(){},;.\\-+\\/*]/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/#{DIGIT}+(\\.#{DIGIT}+)?/) then\n action { [:NUMBER, text] }\n when text = ss.scan(/nil/) then\n action { [:NIL, text] }\n when text = ss.scan(/false/) then\n action { [:FALSE, text] }\n when text = ss.scan(/true/) then\n action { [:TRUE, text] }\n when text = ss.scan(/#{ALPHA}(#{ALPHA}|#{DIGIT})*/) then\n action { [:IDENTIFIER, text] }\n when ss.skip(/\"\"/) then\n action { [:STRING, '\"\"'] }\n when ss.skip(/\"/) then\n [:state, :IN_STRING]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n when :IN_STRING then\n case\n when text = ss.scan(/[^\"]+/) then\n action { [:STRING, \"\\\"#{text}\\\"\"] }\n when ss.skip(/\"/) then\n [:state, nil]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def parse_spc\n @spc = @row[/(^[ \\t]+)/,1].to_s #sometime result is nil\n @ron = @row.strip.size\n end", "def is_space(char)\n char == \" \" ? 1 : 0\nend", "def whitespace!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 45 )\n\n\n\n type = WHITESPACE\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 66:14: ( '\\\\t' | '\\\\f' | '\\\\n' | '\\\\r' | ' ' | '\\\\u00A0' )+\n # at file 66:14: ( '\\\\t' | '\\\\f' | '\\\\n' | '\\\\r' | ' ' | '\\\\u00A0' )+\n match_count_9 = 0\n while true\n alt_9 = 2\n look_9_0 = @input.peek( 1 )\n\n if ( look_9_0.between?( 0x9, 0xa ) || look_9_0.between?( 0xc, 0xd ) || look_9_0 == 0x20 || look_9_0 == 0xa0 )\n alt_9 = 1\n\n end\n case alt_9\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x9, 0xa ) || @input.peek( 1 ).between?( 0xc, 0xd ) || @input.peek(1) == 0x20 || @input.peek(1) == 0xa0\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n match_count_9 > 0 and break\n eee = EarlyExit(9)\n\n\n raise eee\n end\n match_count_9 += 1\n end\n\n\n\n # --> action\n channel=HIDDEN; \n # <-- action\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 45 )\n\n\n end", "def _LWSP_char\n\n _save = self.pos\n while true # choice\n _tmp = apply(:_SPACE)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_HTAB)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_LWSP_char unless _tmp\n return _tmp\n end", "def left(spaces = T.unsafe(nil)); end", "def left(spaces = T.unsafe(nil)); end", "def peek_token\n return nil if @start >= @expr.length\n if @start == 0 && @finish == 0\n return @expr[0]\n else\n token = @expr[@start...@finish]\n\n if token.empty?\n @finish = @finish + 1\n peek_token\n else\n return token\n end\n end\n end", "def get_token\n\t\tt = Token.new\n\t\tcase @src[@lineno][@linepos]\n\t\t\twhen ' ' then\n\t\t\t\tskip_whitespace\n\t\t\twhen '\\f' then #less likely to see this\n\t\t\t\tskip_whitespace\n\t\t\twhen '\\t' then\n\t\t\t\tskip_whitespace\n\t\t\twhen '\\v' then\n\t\t\t\tskip_whitespace\n\t\t\twhen '0'..'9' then\n\t\t\t\tt = parse_number\n\t\t\twhen 'A-Z' then\n\t\t\t\tt = parse_name\n\t\t\twhen 'a-z' then\n\t\t\t\tparse_name\n\t\t\twhen '_' then\n\t\t\t\tt = parse_name\n\t\t\twhen /[~!$%\\^&*()-+=|{}\\[\\]\\:;\\/?<>,.]/ then #very much check\n\t\t\t\tt = parse_operator\n\t\t\twhen '\"' then\n\t\t\t\tt = parse_string\n\t\tend\n\tend", "def skip_space!\n if !do_skip_space!\n return false\n end\n while do_skip_space!; end\n true\n end", "def find_minimum_indent\n self.lines.map { |s| s.index(/[^\\s]/) unless s.empty? }.compact.min\n end", "def _nil\n\n _save = self.pos\n while true # sequence\n _tmp = match_string(\"nil\")\n unless _tmp\n self.pos = _save\n break\n end\n _save1 = self.pos\n _tmp = apply(:_utfw)\n _tmp = _tmp ? nil : true\n self.pos = _save1\n unless _tmp\n self.pos = _save\n end\n break\n end # end sequence\n\n set_failed_rule :_nil unless _tmp\n return _tmp\n end", "def _sig_wsp\n _save = self.pos\n\n _save1 = self.pos\n while true # choice\n _tmp = match_string(\" \")\n break if _tmp\n self.pos = _save1\n _tmp = match_string(\"\\t\")\n break if _tmp\n self.pos = _save1\n _tmp = match_string(\"\\n\")\n break if _tmp\n self.pos = _save1\n _tmp = apply(:_comment)\n break if _tmp\n self.pos = _save1\n break\n end # end choice\n\n if _tmp\n while true\n\n _save2 = self.pos\n while true # choice\n _tmp = match_string(\" \")\n break if _tmp\n self.pos = _save2\n _tmp = match_string(\"\\t\")\n break if _tmp\n self.pos = _save2\n _tmp = match_string(\"\\n\")\n break if _tmp\n self.pos = _save2\n _tmp = apply(:_comment)\n break if _tmp\n self.pos = _save2\n break\n end # end choice\n\n break unless _tmp\n end\n _tmp = true\n else\n self.pos = _save\n end\n set_failed_rule :_sig_wsp unless _tmp\n return _tmp\n end", "def get_word\n return if @scanner.eos?\n\n # advance until first char difference from space\n if @scanner.match?(/\\s/)\n @scanner.scan_until(/\\S/)\n @scanner.pointer -= 1\n end\n\n if QUOTES_STRING.include? @scanner.peek(1)\n # read until next quote\n match = @scanner.scan(/(?:\"(?<val>[^\"\\\\]*(?:\\\\.[^\"\\\\]*)*\"))/)\n\n # remove last quote and unscape them\n match = match.strip[1..-2].gsub('\\\"', '\"') unless match.nil?\n else\n match = @scanner.scan_until(/\\s/)\n if match.nil?\n match = @scanner.scan_until(/$/)\n end\n\n match.strip! unless match.nil?\n end\n\n # Advanced until next word\n @scanner.scan(/\\s+/)\n\n match\n end", "def skip_space; end", "def scan_single_char()\n\t\tresult = if metachar?(lexeme)\n\t\t\t# A Meta-character...\n\t\t\tenqueue_token(lexeme.dup)\n\t\telse\n\t\t\tenqueue_token(:T_CHARLIT)\n\t\tend\n\t\t\n\t\treturn result\n\tend", "def whitespace\n @tag_data[:whitespace]\n end", "def token\n ready_token\n\n i = @buffer.index(/[\\[\\]()<>{}\\s\\/]/) || @buffer.size\n\n token_chars =\n if i == 0 and @buffer[i,2] == \"<<\" then 2\n elsif i == 0 and @buffer[i,2] == \">>\" then 2\n elsif i == 0 then 1\n else i\n end\n\n strip_space = !(i == 0 and @buffer[0,1] == '(')\n tok = head(token_chars, strip_space)\n\n if tok == \"\"\n nil\n elsif tok[0,1] == \"%\"\n @buffer = \"\"\n token\n else\n tok\n end\n end", "def next_token(w)\n\tif w[0] != '('\n\t\treturn w.slice!(0)\n\tend\n\n\tw.slice!(0) #remove that leading '('\n\n\ttoken = \"\"\n\twhile true\n\t\tc = w.slice!(0)\n\t\tif c == ')'\n\t\t\tbreak\n\t\tend\n\n\t\ttoken << c\n\tend\n\n\treturn token\nend", "def skipSpaces(pos)\n max = @src.length\n while pos < max\n ch = charCodeAt(@src, pos)\n break if !isSpace(ch)\n pos += 1\n end\n return pos\n end", "def skip_whitespace\n while @char =~ /[\\s,;#]/\n # Comments begin with a semicolon and extend to the end of the line\n # Treat #! as a comment for shebang lines\n if @char == ';' || (@char == '#' && peek_char == '!')\n while @char && @char != \"\\n\"\n next_char\n end\n elsif @char == '#'\n break unless peek_char == '_'\n next_char; next_char # skip #_\n skip_whitespace\n incomplete_error \"Unexpected end of program after #_, expected a form\" unless @char\n parse_form # discard next form\n else\n next_char\n end\n end\n end", "def find_whitespace_index(characters)\n n = characters.length\n n.times do |i|\n if characters[i] == \" \"\n return i\n end\n end\n return n\nend", "def consume\n return nil if @s.eos?\n\n @s.mark\n return create_token(:whitespace) if @s.scan(RE_WHITESPACE)\n\n char = @s.consume\n\n case char.to_sym\n when :'\"'\n consume_string('\"')\n\n when :'#'\n if @s.peek =~ RE_NAME || valid_escape?\n create_token(:hash,\n :type => start_identifier? ? :id : :unrestricted,\n :value => consume_name)\n else\n create_token(:delim, :value => char)\n end\n\n when :'$'\n if @s.peek == '='\n @s.consume\n create_token(:suffix_match)\n else\n create_token(:delim, :value => char)\n end\n\n when :\"'\"\n consume_string(\"'\")\n\n when :'('\n create_token(:'(')\n\n when :')'\n create_token(:')')\n\n when :*\n if @s.peek == '='\n @s.consume\n create_token(:substring_match)\n\n elsif @options[:preserve_hacks] && @s.peek =~ RE_NAME_START\n # NON-STANDARD: IE * hack\n @s.reconsume\n consume_ident\n\n else\n create_token(:delim, :value => char)\n end\n\n when :+\n if start_number?(char + @s.peek(2))\n @s.reconsume\n consume_numeric\n else\n create_token(:delim, :value => char)\n end\n\n when :','\n create_token(:comma)\n\n when :-\n if start_number?(char + @s.peek(2))\n @s.reconsume\n consume_numeric\n elsif start_identifier?(char + @s.peek(2))\n @s.reconsume\n consume_ident\n elsif @s.peek(2) == '->'\n @s.consume\n @s.consume\n create_token(:cdc)\n else\n create_token(:delim, :value => char)\n end\n\n when :'.'\n if start_number?(char + @s.peek(2))\n @s.reconsume\n consume_numeric\n else\n create_token(:delim, :value => char)\n end\n\n when :/\n if @s.peek == '*'\n @s.consume\n\n if text = @s.scan_until(RE_COMMENT_CLOSE)\n text.slice!(-2, 2)\n else\n text = @s.consume_rest\n end\n\n if @options[:preserve_comments]\n create_token(:comment, :value => text)\n else\n consume\n end\n else\n create_token(:delim, :value => char)\n end\n\n when :':'\n create_token(:colon)\n\n when :';'\n create_token(:semicolon)\n\n when :<\n if @s.peek(3) == '!--'\n @s.consume\n @s.consume\n @s.consume\n\n create_token(:cdo)\n else\n create_token(:delim, :value => char)\n end\n\n when :'@'\n if start_identifier?\n create_token(:at_keyword, :value => consume_name)\n else\n create_token(:delim, :value => char)\n end\n\n when :'['\n create_token(:'[')\n\n when :'\\\\'\n if valid_escape?(char + @s.peek)\n @s.reconsume\n consume_ident\n else\n create_token(:delim,\n :error => true,\n :value => char)\n end\n\n when :']'\n create_token(:']')\n\n when :'^'\n if @s.peek == '='\n @s.consume\n create_token(:prefix_match)\n else\n create_token(:delim, :value => char)\n end\n\n when :'{'\n create_token(:'{')\n\n when :'}'\n create_token(:'}')\n\n when :U, :u\n if @s.peek(2) =~ RE_UNICODE_RANGE_START\n @s.consume\n consume_unicode_range\n else\n @s.reconsume\n consume_ident\n end\n\n when :|\n case @s.peek\n when '='\n @s.consume\n create_token(:dash_match)\n\n when '|'\n @s.consume\n create_token(:column)\n\n else\n create_token(:delim, :value => char)\n end\n\n when :~\n if @s.peek == '='\n @s.consume\n create_token(:include_match)\n else\n create_token(:delim, :value => char)\n end\n\n else\n case char\n when RE_DIGIT\n @s.reconsume\n consume_numeric\n\n when RE_NAME_START\n @s.reconsume\n consume_ident\n\n else\n create_token(:delim, :value => char)\n end\n end\n end", "def whitespace\n @input = @input.gsub(/\\ +/, ' ').strip\n end", "def single_space_before(string)\n return false if string[-2] != ' '\n return false if string[-3] == ' '\n true\n end", "def space!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 4 )\n\n type = SPACE\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 28:5: WS\n ws!\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 4 )\n\n end", "def minimum_leading_whitespace\n whitespace = split(\"\\n\", -1).inject(0) do |indent, line|\n if line.strip.empty?\n indent # Ignore completely blank lines.\n elsif line =~ /^(\\s+)/\n (1.0 / $1.length) > indent ? 1.0 / $1.length : indent\n else\n 1.0\n end\n end\n\n whitespace == 1.0 ? 0 : (1.0 / whitespace).to_i\n end", "def consume\n return nil if @s.eos?\n\n @s.mark\n\n # Consume comments.\n if comment_token = consume_comments\n if @options[:preserve_comments]\n return comment_token\n else\n return consume\n end\n end\n\n # Consume whitespace.\n return create_token(:whitespace) if @s.scan(RE_WHITESPACE)\n\n char = @s.consume\n\n case char.to_sym\n when :'\"'\n consume_string\n\n when :'#'\n if @s.peek =~ RE_NAME || valid_escape?(@s.peek(2))\n create_token(:hash,\n :type => start_identifier?(@s.peek(3)) ? :id : :unrestricted,\n :value => consume_name)\n else\n create_token(:delim, :value => char)\n end\n\n when :'$'\n if @s.peek == '='\n @s.consume\n create_token(:suffix_match)\n else\n create_token(:delim, :value => char)\n end\n\n when :\"'\"\n consume_string\n\n when :'('\n create_token(:'(')\n\n when :')'\n create_token(:')')\n\n when :*\n if @s.peek == '='\n @s.consume\n create_token(:substring_match)\n\n # Non-standard: Preserve the IE * hack.\n elsif @options[:preserve_hacks] && @s.peek =~ RE_NAME_START\n @s.reconsume\n consume_ident\n\n else\n create_token(:delim, :value => char)\n end\n\n when :+\n if start_number?\n @s.reconsume\n consume_numeric\n else\n create_token(:delim, :value => char)\n end\n\n when :','\n create_token(:comma)\n\n when :-\n nextTwoChars = @s.peek(2)\n nextThreeChars = char + nextTwoChars\n\n if start_number?(nextThreeChars)\n @s.reconsume\n consume_numeric\n elsif nextTwoChars == '->'\n @s.consume\n @s.consume\n create_token(:cdc)\n elsif start_identifier?(nextThreeChars)\n @s.reconsume\n consume_ident\n else\n create_token(:delim, :value => char)\n end\n\n when :'.'\n if start_number?\n @s.reconsume\n consume_numeric\n else\n create_token(:delim, :value => char)\n end\n\n when :':'\n create_token(:colon)\n\n when :';'\n create_token(:semicolon)\n\n when :<\n if @s.peek(3) == '!--'\n @s.consume\n @s.consume\n @s.consume\n\n create_token(:cdo)\n else\n create_token(:delim, :value => char)\n end\n\n when :'@'\n if start_identifier?(@s.peek(3))\n create_token(:at_keyword, :value => consume_name)\n else\n create_token(:delim, :value => char)\n end\n\n when :'['\n create_token(:'[')\n\n when :'\\\\'\n if valid_escape?\n @s.reconsume\n consume_ident\n else\n # Parse error.\n create_token(:delim,\n :error => true,\n :value => char)\n end\n\n when :']'\n create_token(:']')\n\n when :'^'\n if @s.peek == '='\n @s.consume\n create_token(:prefix_match)\n else\n create_token(:delim, :value => char)\n end\n\n when :'{'\n create_token(:'{')\n\n when :'}'\n create_token(:'}')\n\n when :U, :u\n if @s.peek(2) =~ RE_UNICODE_RANGE_START\n @s.consume\n consume_unicode_range\n else\n @s.reconsume\n consume_ident\n end\n\n when :|\n case @s.peek\n when '='\n @s.consume\n create_token(:dash_match)\n\n when '|'\n @s.consume\n create_token(:column)\n\n else\n create_token(:delim, :value => char)\n end\n\n when :~\n if @s.peek == '='\n @s.consume\n create_token(:include_match)\n else\n create_token(:delim, :value => char)\n end\n\n else\n case char\n when RE_DIGIT\n @s.reconsume\n consume_numeric\n\n when RE_NAME_START\n @s.reconsume\n consume_ident\n\n else\n create_token(:delim, :value => char)\n end\n end\n end", "def strip_whitespace\n code.gsub!(WHITESPACE_REGEX, ' ')\n\n code\n end", "def token_terminator\n %r{(?:(?=[[:space:]])|$)}\n end", "def suppress_newlines\n @tokens.pop if value[0] == ?\\\n end", "def preserve_whitespace?\n mode_is_code current_mode\n end", "def line_terminator\n if line_terminator?(@codes[@pos])\n begin\n @pos += 1\n end until !line_terminator?(@codes[@pos])\n return ECMA262::LineTerminator.get\n else\n nil\n end\n end", "def consume_trailing_spaces # :nodoc:\n skip_tkspace_without_nl\n end", "def first_word\r\n text =~ /^.*?\\S.*?/\r\n $&\r\n end", "def verify_only_whitespace_is_present(xn)\n verify_text_matches_regex(xn, /\\A[ \\n]*\\z/, 'contained non-whitespace')\n end", "def read_character\n lit = read_literal\n\n return \" \" if lit.empty? && peek_char == \" \"\n CHARACTERS.fetch(lit.downcase) do\n # Return just the first character\n unread(lit[1..-1])\n lit[0,1]\n end\n end", "def skip_tkspace_without_nl\n tokens = []\n\n while (tk = get_tk) and :on_sp == tk[:kind] do\n tokens.push(tk)\n end\n\n unget_tk(tk)\n tokens\n end", "def balance_space?\n inside_empty_brackets?\n end", "def empty_statement(var_env)\n a = peek_lit(nil)\n if a == ECMA262::PUNC_SEMICOLON\n fwd_after_peek\n ECMA262::StEmpty.new\n else\n nil\n end\n end", "def skip_white\n while is_white(@look)\n get_char\n end\nend", "def skip_space(ix, direction)\n diff = { :left => -1, :right => 1 }[direction]\n ix += diff until ix < 1 || char_at(ix) =~ /[^ \\t]/ || char_at(ix).blank?\n ix\n end", "def _bs\n\n _save = self.pos\n while true # choice\n _tmp = match_string(\" \")\n break if _tmp\n self.pos = _save\n _tmp = match_string(\"\\t\")\n break if _tmp\n self.pos = _save\n _tmp = match_string(\"\\n\")\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n return _tmp\n end", "def test_parser_handles_whitespace_no_text_simple_content\n text = 'Simple Content'\n\n simple_content_assert ' ', ' '\n simple_content_assert \" #{text}\", \" #{text}\"\n simple_content_assert \"#{text} \", \"#{text} \"\n simple_content_assert \" #{text} \", \" #{text} \"\n end", "def peekChar\r\n\t\tcheckLine\r\n\t\tcheckSpace\r\n\t\treturn @Line[0]\r\n\tend", "def peek_next\n fail 'No string specified' unless @str\n\n return Token.new(:eos) if skip_space == :eos\n\n PATTERNS.each do |re, func|\n re.match(@str) do |mat|\n @last_re = re # This is what will be removed\n mat = mat.to_s\n return func.is_a?(Symbol) ? send(func, mat) : instance_exec(mat, &func)\n end\n end\n end" ]
[ "0.7012271", "0.6961337", "0.68803036", "0.680081", "0.6706844", "0.6389385", "0.6382983", "0.6251076", "0.62392366", "0.62300813", "0.6194158", "0.61813813", "0.61427003", "0.6138353", "0.60783905", "0.60439825", "0.59633714", "0.592747", "0.5923118", "0.58975196", "0.5889778", "0.58896714", "0.58750296", "0.58595246", "0.5789315", "0.57785577", "0.57552236", "0.5753388", "0.5723767", "0.57226944", "0.5704261", "0.5658365", "0.5645996", "0.56248724", "0.56081706", "0.55933404", "0.5519622", "0.5511031", "0.54978144", "0.54921526", "0.5480703", "0.5462112", "0.5401714", "0.537999", "0.5360938", "0.53404045", "0.533859", "0.5333665", "0.5330287", "0.53083324", "0.52726316", "0.5270034", "0.52695835", "0.5262079", "0.5250597", "0.5242772", "0.5230424", "0.52129835", "0.5191115", "0.5181771", "0.5181771", "0.51721257", "0.51631194", "0.51393855", "0.51379794", "0.5130857", "0.51242876", "0.5119522", "0.51128525", "0.51093966", "0.5103199", "0.51002866", "0.5086039", "0.50820553", "0.50632334", "0.5059854", "0.50584537", "0.5044902", "0.50443846", "0.50369537", "0.50326836", "0.5025706", "0.5013519", "0.50130224", "0.50061977", "0.50042117", "0.49952757", "0.49739185", "0.49688843", "0.49658448", "0.49554297", "0.4952754", "0.49497962", "0.49445784", "0.494193", "0.49410155", "0.4938621", "0.49336085", "0.49124852", "0.49098006" ]
0.8135252
0
Tests next literal is LineTerminator or not. If literal is LineTerminator return ECMA262::LineTerminator object and forward lexical parser position. Otherwise return nil and position is not changed. Even if next literal is sequence of two or more line terminators, this method returns only one line terminator.
def line_terminator if line_terminator?(@codes[@pos]) begin @pos += 1 end until !line_terminator?(@codes[@pos]) return ECMA262::LineTerminator.get else nil end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def newline_token(offset)\n @tokens.pop while value == ';'\n\n # 1. function prototype\n if tag == ':'\n indent_token(@indent+2)\n outdent_token(2)\n\n # 2. prevent doubles terminators\n # 3. prevent terminator after indent\n # 4. prevent starting with a term on an empty file\n elsif ![:TERM, :INDENT].include?(tag) && !tokens.empty?\n token(:TERM, \"\\n\", offset, 0)\n end\n end", "def _CRLF\n\n _save = self.pos\n while true # sequence\n _tmp = apply(:_CR)\n unless _tmp\n self.pos = _save\n break\n end\n _tmp = apply(:_LF)\n unless _tmp\n self.pos = _save\n end\n break\n end # end sequence\n\n set_failed_rule :_CRLF unless _tmp\n return _tmp\n end", "def eat_eol()\n if eol_as_token # if eol is significant in the language...\n position = build_position(:lexeme)\n eol_lexeme = scanner.scan(eol_pattern) # Consume the eol text\n eol_token = [:T_EOL, RaccLexer::Token.new(eol_lexeme, eol_lexeme, position)]\n queue.unshift eol_token\n else\n scanner.scan(eol_pattern) # Consume the eol text\n end\n\n @lineno += 1\n @line_offset = scanner.pos()\n end", "def single_line_comment\n # //\n if @codes[@pos] == 0x2f and @codes[@pos + 1] == 0x2f\n @pos += 2\n pos0 = @pos\n while (code = @codes[@pos]) and !line_terminator?(code)\n @pos += 1\n end\n return ECMA262::SingleLineComment.new(@codes[pos0...@pos].pack(\"U*\"))\n else\n nil\n end\n end", "def parse_line_break; end", "def skip_white_space_or_to_eoln\r\n while (next_char = @source.get)\r\n return next_char if (next_char > ' ') || @source.eoln?\r\n end\r\n end", "def next_line\n return nil if @input.eof?\n line, ch, @pos, @line_no = '', '', 0, @line_no + 1\n until ch == \"\\r\" || ch == \"\\n\" || ch.nil?\n ch = @input.getc\n line += ch unless ch.nil?\n end\n if ch == \"\\r\"\n ch = @input.getc\n @input.ungetc(ch) unless ch == \"\\n\" || ch.nil?\n end\n line.chomp << \"\\n\"\n end", "def terminator\n @terminator ||= whodunnit\n end", "def newline?\n @kind == :newline\n end", "def eol_pattern()\n return /\\r\\n?|\\n/ # Applicable to *nix, Mac, Windows eol conventions\n end", "def skip_to_eoln\r\n @source.get until @source.eoln?\r\n true\r\n end", "def token_terminator\n %r{(?:(?=[[:space:]])|$)}\n end", "def eol!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 39 )\n\n type = EOL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 138:6: ( '\\\\r' )? '\\\\n'\n # at line 138:6: ( '\\\\r' )?\n alt_2 = 2\n look_2_0 = @input.peek( 1 )\n\n if ( look_2_0 == 0xd )\n alt_2 = 1\n end\n case alt_2\n when 1\n # at line 138:6: '\\\\r'\n match( 0xd )\n\n end\n match( 0xa )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 39 )\n\n end", "def parse_newline\n s0 = @scanner.pos\n s1 = []\n s2 = parse_whitespace\n while s2 != :failed\n s1 << s2\n s2 = parse_whitespace\n end\n s2 = match_str(\"\\n\")\n if s2 == :failed\n s2 = @scanner.pos\n s3 = match_str(\"\\r\")\n s2 = if s3 == :failed\n @scanner.pos = s2\n :failed\n else\n s4 = match_str(\"\\n\")\n s4 = nil if s4 == :failed\n [s3, s4]\n end\n end\n if s2 == :failed\n @scanner.pos = s0\n :failed\n else\n [s1, s2]\n end\n end", "def next_token\n @sy = @tokenizer.next_token\n \n # ignore EOL tokens since no productions would accept them\n while @sy.type == TokenType::EOL_TOKEN\n @sy = @tokenizer.next_token\n end\n end", "def parse_crlf data\n return if data == ''\n if ilf = data.index(\"\\n\")\n # if we find a LF and that LF is after a CR we first handle\n # the CR\n if icr = data.index(\"\\r\") and ilf != (icr+1) and icr < ilf\n parse_cr data, icr\n else\n parse_lf data, ilf\n end\n else\n if icr = data.index(\"\\r\")\n parse_cr data, icr\n else\n @linebuffer << data\n @outputbuffer.print data\n end\n end\n end", "def eol!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 55 )\n\n type = EOL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 354:6: ( '\\\\r' )? '\\\\n'\n # at line 354:6: ( '\\\\r' )?\n alt_6 = 2\n look_6_0 = @input.peek( 1 )\n\n if ( look_6_0 == 0xd )\n alt_6 = 1\n end\n case alt_6\n when 1\n # at line 354:6: '\\\\r'\n match( 0xd )\n\n end\n match( 0xa )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 55 )\n\n end", "def line_token\n return nil unless md = MULTI_DENT.match(@chunk)\n\n @tokens.last.last.push newLine: true\n token :Terminator, \"\\n\"\n\n indent = md.to_a[0]\n num_newlines = count(indent, \"\\n\")\n spaces = indent.length - num_newlines\n\n @line += num_newlines\n\n movement = spaces - @indent\n if movement > 0\n @indents.push movement\n token :Indent, movement\n elsif movement < 0\n outdent_token movement.abs, num_newlines\n end\n\n @indent += movement\n indent.length\n end", "def killtoeol\n char = @text[@cursor, 1]\n if char == \"\\n\"\n killto :killtoeol do movetoright end\n else\n killto :killtoeol do movetoeol end\n end\n end", "def read_next()\n return nil if @at_end\n\n begin\n pos = @marker.position\n\n if @marker.character == ?\\n\n pos.line += 1\n pos.column = 0\n end\n\n @marker.character = @reader.next\n @marker.source_index += 1\n pos.column += 1\n rescue StopIteration\n @at_end = true\n @marker.character = nil\n end\n\n @marker.character\n end", "def nontrivial_end_line\n if successor\n successor.line_numbers.begin - 1\n else\n @document.last_non_empty_line\n end\n end", "def multi_line_comment\n # /*\n if @codes[@pos] == 0x2f and @codes[@pos + 1] == 0x2a\n @pos += 2\n pos0 = @pos\n # */\n while (code = @codes[@pos] != 0x2a) or @codes[@pos + 1] != 0x2f\n raise ParseError.new(\"no `*/' at end of comment\", self) if code.nil?\n @pos += 1\n end\n @pos +=2\n return ECMA262::MultiLineComment.new(@codes[pos0...(@pos-2)].pack(\"U*\"))\n else\n nil\n end\n end", "def whitespace_token\n return if !(match = @chunk.match(WHITESPACE)) || (@chunk[0] == \"\\n\")\n prev = @tokens[-1]\n prev.send(match ? :spaced= : :new_line=, true) if prev\n match ? match[0].size : 0\n end", "def nextLine\r\n\t\twhile (@allLines[0] == \"\" || @allLines[0] == \"\\r\\n\" || @allLines[0] == \"\\n\")\r\n\t\t\[email protected]\r\n\t\tend\r\n\t\tif(@allLines[0]!=nil)\r\n\t\t\t@Line = @allLines[0]\r\n\t\t\[email protected]\r\n\t\t\tcheckSpace\r\n\t\tend\r\n\tend", "def newline_before_nonwhitespace(string)\n offset = -2\n while /\\S/.match(string[offset]).nil?\n return true if string[offset] == \"\\n\"\n offset -= 1\n end\n false\n end", "def next_token\n #dputs \"@line: \" + @line\n if @state == :normal\n while true\n temp = _next_token\n unless temp == \"#white_space\" || temp == \"#comment\"\n break\n end\n end\n #dputs \"token: \" + temp\n @current_token = temp\n return temp\n else\n return :Terminate\n end\n \n end", "def is_multiline?\n @code =~ /\\n/\n end", "def _EmptyLine\n\n _save = self.pos\n while true # sequence\n _tmp = scan(/\\G(?-mix:^)/)\n unless _tmp\n self.pos = _save\n break\n end\n _tmp = apply(:__hyphen_)\n unless _tmp\n self.pos = _save\n break\n end\n\n _save1 = self.pos\n while true # choice\n _tmp = apply(:_Nl)\n break if _tmp\n self.pos = _save1\n _tmp = apply(:_Comment)\n break if _tmp\n self.pos = _save1\n _tmp = apply(:_EofComment)\n break if _tmp\n self.pos = _save1\n break\n end # end choice\n\n unless _tmp\n self.pos = _save\n end\n break\n end # end sequence\n\n set_failed_rule :_EmptyLine unless _tmp\n return _tmp\n end", "def next\n ret = peek_next\n @str.slice! @last_re if ret.type != :eos\n\n ret\n end", "def fresh_line?\n @content.empty? || @content[-1].eql?(NL)\n end", "def reduce_new_line(_production, _range, _tokens, _children)\n # TODO: control portability\n Regex::Character.new('\\n')\n end", "def next_line\r\n while true\r\n if (@lexemes[@pointer] != nil && @lexemes[@pointer] != '@')\r\n @pointer = @pointer.next\r\n else\r\n break\r\n end\r\n end\r\n end", "def remove_leading_newlines\n @tokens.shift if @tokens[0][0] == \"\\n\"\n end", "def eof?\n peek_lit(nil).nil?\n end", "def line_ending\n @line_ending ||= \"\\r\\n\"\n end", "def return_statement_tail\n expression if @enum.peek != Token.new(:symbol, ';')\n semicolon\n #@instruction.push(\";\")\n #@instruction.push(\"\\n\")\n end", "def test_multi_line_string\r\n test_string = \"a\\nb\\nc\"\r\n source = XfOOrth::StringSource.new(test_string)\r\n\r\n assert_equal(source.get, 'a')\r\n refute(source.eoln?)\r\n refute(source.eof?)\r\n\r\n assert_equal(source.get, ' ')\r\n assert(source.eoln?)\r\n refute(source.eof?)\r\n\r\n assert_equal(source.get, 'b')\r\n refute(source.eoln?)\r\n refute(source.eof?)\r\n\r\n assert_equal(source.get, ' ')\r\n assert(source.eoln?)\r\n refute(source.eof?)\r\n\r\n assert_equal(source.get, 'c')\r\n refute(source.eoln?)\r\n refute(source.eof?)\r\n\r\n assert_equal(source.get, ' ')\r\n assert(source.eoln?)\r\n refute(source.eof?)\r\n\r\n assert_equal(source.get, nil)\r\n assert(source.eoln?)\r\n assert(source.eof?)\r\n end", "def get_end_token tk # :nodoc:\n case tk[:kind]\n when :on_lparen\n token = RDoc::Parser::RipperStateLex::Token.new\n token[:kind] = :on_rparen\n token[:text] = ')'\n token\n when :on_rparen\n nil\n else\n token = RDoc::Parser::RipperStateLex::Token.new\n token[:kind] = :on_nl\n token[:text] = \"\\n\"\n token\n end\n end", "def _end_hyphen_of_hyphen_line\n\n _save = self.pos\n while true # choice\n _tmp = match_string(\"\\\\r\\\\n\")\n break if _tmp\n self.pos = _save\n _tmp = match_string(\"\\\\n\")\n break if _tmp\n self.pos = _save\n _tmp = match_string(\"\\\\r\")\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_end_hyphen_of_hyphen_line unless _tmp\n return _tmp\n end", "def white_space\n if white_space?(@codes[@pos])\n begin\n @pos += 1\n end until !white_space?(@codes[@pos])\n return ECMA262::WhiteSpace.get\n else\n nil\n end\n end", "def test_rchomp_with_leading_newline\n string = \"\\nhello\"\n assert_eq @buffer.rchomp(string), 'hello'\n end", "def nl!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 51 )\n\n\n\n type = NL\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 230:5: ( '\\\\n' )+\n # at file 230:5: ( '\\\\n' )+\n match_count_8 = 0\n while true\n alt_8 = 2\n look_8_0 = @input.peek( 1 )\n\n if ( look_8_0 == 0xa )\n alt_8 = 1\n\n end\n case alt_8\n when 1\n # at line 230:5: '\\\\n'\n match( 0xa )\n\n else\n match_count_8 > 0 and break\n eee = EarlyExit(8)\n\n\n raise eee\n end\n match_count_8 += 1\n end\n\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 51 )\n\n\n end", "def test_multi_line_string_again\r\n test_string = \"a\\nb\\nc\\n\"\r\n source = XfOOrth::StringSource.new(test_string)\r\n\r\n assert_equal(source.get, 'a')\r\n refute(source.eoln?)\r\n refute(source.eof?)\r\n\r\n assert_equal(source.get, ' ')\r\n assert(source.eoln?)\r\n refute(source.eof?)\r\n\r\n assert_equal(source.get, 'b')\r\n refute(source.eoln?)\r\n refute(source.eof?)\r\n\r\n assert_equal(source.get, ' ')\r\n assert(source.eoln?)\r\n refute(source.eof?)\r\n\r\n assert_equal(source.get, 'c')\r\n refute(source.eoln?)\r\n refute(source.eof?)\r\n\r\n assert_equal(source.get, ' ')\r\n assert(source.eoln?)\r\n refute(source.eof?)\r\n\r\n assert_equal(source.get, nil)\r\n assert(source.eoln?)\r\n assert(source.eof?)\r\n end", "def new_line?\n ary = insertion_point\n ary.empty? || ary.all? {|stmt| stmt.type == :code }\n end", "def single_line?; end", "def test_single_line_string\r\n test_string = \"abc\"\r\n source = XfOOrth::StringSource.new(test_string)\r\n\r\n assert_equal(source.get, 'a')\r\n refute(source.eoln?)\r\n refute(source.eof?)\r\n\r\n assert_equal(source.get, 'b')\r\n refute(source.eoln?)\r\n refute(source.eof?)\r\n\r\n assert_equal(source.get, 'c')\r\n refute(source.eoln?)\r\n refute(source.eof?)\r\n\r\n assert_equal(source.get, ' ')\r\n assert(source.eoln?)\r\n refute(source.eof?)\r\n\r\n assert_equal(source.get, nil)\r\n assert(source.eoln?)\r\n assert(source.eof?)\r\n end", "def NextChar\r\n\t\[email protected]!(0, 1)\r\n\tend", "def match_with_eol_regex\n return match_regex if resource[:exact]\n return /^(?>#{match_regex})#{$/}/\n end", "def next_item\n return @last_lexeme if @last_lexeme[0].nil?\n while true\n @line = next_line if buffer_empty?\n if @line.nil?\n lexeme = [nil, @line_no, 1]\n break\n end\n\n # Skip whitespaces\n while space?(@line[@pos])\n @pos += 1\n end\n\n # Skip triple dot characters (join lines)\n if @line[@pos, 4] == \"...\\n\" || @line[@pos, 2] == \"…\\n\"\n line_no, pos = @line_no, @pos + 1\n @line, @pos = next_line, 0\n if @line.nil? || @line.strip.empty?\n raise SyntaxError.new(line_no, pos, 'Line continuation may not be followed by an empty line')\n end\n next\n end\n\n # Skip one line comments\n if @line[@pos, 3] == 'BTW'\n @pos = @line.length - 1\n end\n # and multiline ones\n if @last_lexeme[0] == \"\\n\" && @line[@pos, 4] == 'OBTW'\n tldr_found, line_no, pos = false, @line_no, @pos + 1\n while true\n @line = next_line\n break if @line.nil?\n m = @line.chomp.match(/(^|\\s+)TLDR\\s*(,|$)/)\n unless m.nil?\n tldr_found = true\n @pos = m.end(0)\n break\n end\n end\n unless tldr_found\n raise SyntaxError.new(line_no, pos, 'Unterminated multiline comment')\n end\n next\n end\n\n if @line[@pos] == \"\\n\" || @line[@pos] == '!'\n # Handle newline and bang separately\n lexeme = [@line[@pos], @line_no, @pos + 1]\n @pos += 1\n elsif @line[@pos] == ','\n # Comma is a virtual newline\n lexeme = [\"\\n\", @line_no, @pos + 1]\n @pos += 1\n elsif @line[@pos] == '\"'\n # Strings begin with \"\n # Need to handle empty strings separately\n if @line[@pos + 1] == '\"'\n string = '\"\"'\n else\n m = @line.match(/([^:](?:::)*)\"/, @pos + 1)\n string = @line[@pos..m.end(0) - 1] unless m.nil?\n end\n # String must be followed by an allowed lexeme delimiter\n if string.nil? || !lexeme_delimiter?(@pos + string.length)\n raise SyntaxError.new(@line_no, @pos + 1, 'Unterminated string constant')\n end\n lexeme = [%Q[\"#{escape_string(string[1..-2])}\"], @line_no, @pos + 1]\n @pos = @pos + string.length\n else\n # Grab as much characters as we can until meeting lexeme delimiter\n # Treat what we grabbed as a lexeme\n seq, pos = '', @pos + 1\n until lexeme_delimiter?(@pos)\n seq += @line[@pos]\n @pos += 1\n end\n lexeme = [seq, @line_no, pos]\n end\n\n break\n end\n @last_lexeme = lexeme\n end", "def string_literal\n # StringLiteral ::\n # \" DoubleStringCharactersopt \"\n # ' SingleStringCharactersopt '\n #\n # DoubleStringCharacters ::\n # DoubleStringCharacter DoubleStringCharactersopt\n #\n # SingleStringCharacters ::\n # SingleStringCharacter SingleStringCharactersopt\n #\n # DoubleStringCharacter ::\n # SourceCharacter but not one of \" or \\ or LineTerminator\n # \\ EscapeSequence\n # LineContinuation\n #\n # SingleStringCharacter ::\n # SourceCharacter but not one of ' or \\ or LineTerminator\n # \\ EscapeSequence\n # LineContinuation\n #\n if (code = @codes[@pos]) == 0x27 #'\n term = 0x27\n elsif code == 0x22 #\"\n term = 0x22\n else\n return nil\n end\n @pos += 1\n pos0 = @pos\n\n str = []\n while (code = @codes[@pos])\n if code.nil?\n raise ParseError.new(\"no `#{term}' at end of string\", self)\n elsif line_terminator?(code)\n raise ParseError.new(\"string has line terminator in body\", self)\n elsif code == 0x5c #\\\n @pos += 1\n str.push(escape_sequence)\n elsif code == term\n @pos += 1\n return ECMA262::ECMA262String.new(str.compact.pack(\"U*\"))\n else\n @pos += 1\n str.push(code)\n end\n end\n nil\n end", "def get\n @source_index += 1\n\n # Maintain line count.\n prev_char = @source_text[@source_index - 1]\n if @source_index.positive? && prev_char == \"\\n\"\n @line_index += 1\n @col_index = -1\n end\n\n @col_index += 1\n char = if @source_index > @last_index\n # Read past the end of source text.\n END_MARK\n else\n @source_text[@source_index]\n end\n Character.new(char, @line_index, @col_index, @source_index, @source_text)\n end", "def lchomp!(match = /[\\r\\n]/)\n if index(match) == 0\n self[0...match.size] = ''\n self\n end\n end", "def emit_end\n ending = Statement.new(:code, :end)\n current = insertion_point\n\n can_skip_line = lambda do |line|\n line.empty? ||\n line.all? {|stmt| stmt.type == :text && stmt.value =~ /\\A\\s*\\z/ }\n end\n\n if can_skip_line[current]\n target = current\n\n # skip past empty whitespace in previous lines\n @source_lines.reverse_each do |line|\n break unless can_skip_line[line]\n target = line\n end\n\n target.unshift ending\n else\n current.push ending\n end\n end", "def is_multiline?(line) # ' '[0] == 32\n line && line.length > 1 && line[-1] == MULTILINE_CHAR_VALUE && line[-2] == 32\n end", "def _normalize_line_end(line)\n return line unless @usecrlf\n # p [ \"nleln\", line ]\n line_len = line.respond_to?(:bytesize) ? line.bytesize : line.length\n last2 = line[line_len-2...line_len]\n # p [ \"nlel2\", last2 ]\n return line unless last2 == \"\\r\\n\"\n return line[0...line_len-2] + \"\\n\"\n end", "def statement_end\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 11 )\n return_value = StatementEndReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n root_0 = nil\n char_literal42 = nil\n __EOF43__ = nil\n\n tree_for_char_literal42 = nil\n tree_for_EOF43 = nil\n\n begin\n # at line 324:3: ( ';' | ( '}' )=> | EOF )\n alt_7 = 3\n alt_7 = @dfa7.predict( @input )\n case alt_7\n when 1\n root_0 = @adaptor.create_flat_list\n\n\n # at line 324:5: ';'\n char_literal42 = match( SEMI, TOKENS_FOLLOWING_SEMI_IN_statement_end_2150 )\n if @state.backtracking == 0\n\n tree_for_char_literal42 = @adaptor.create_with_payload( char_literal42 )\n @adaptor.add_child( root_0, tree_for_char_literal42 )\n\n end\n\n when 2\n root_0 = @adaptor.create_flat_list\n\n\n # at line 325:5: ( '}' )=>\n\n when 3\n root_0 = @adaptor.create_flat_list\n\n\n # at line 326:5: EOF\n __EOF43__ = match( EOF, TOKENS_FOLLOWING_EOF_IN_statement_end_2167 )\n if @state.backtracking == 0\n\n tree_for_EOF43 = @adaptor.create_with_payload( __EOF43__ )\n @adaptor.add_child( root_0, tree_for_EOF43 )\n\n end\n\n end# - - - - - - - rule clean up - - - - - - - -\n return_value.stop = @input.look( -1 )\n\n if @state.backtracking == 0\n\n return_value.tree = @adaptor.rule_post_processing( root_0 )\n @adaptor.set_token_boundaries( return_value.tree, return_value.start, return_value.stop )\n\n end\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n return_value.tree = @adaptor.create_error_node( @input, return_value.start, @input.look(-1), re )\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 11 )\n\n end\n \n return return_value\n end", "def last_line(src)\n if n = src.rindex(\"\\n\")\n src[(n+1) .. -1]\n else\n src\n end\nend", "def line_at(char)\n return nil unless char\n text[0..char].count(\"\\n\") + 1\n end", "def next_token\n return @extra_tokens.pop unless @extra_tokens.empty?\n\n skip_whitespace\n c = @sql[@pos, 1]\n return next_string(c) if quote?(c)\n\n first_is_identifier_char = identifier_char?(c)\n t = c\n @pos += 1\n while @pos < @length\n c = @sql[@pos, 1]\n break if c == ' '\n\n this_is_identifier_char = identifier_char?(c)\n break if first_is_identifier_char != this_is_identifier_char && @length > 0\n break if !this_is_identifier_char && quote?(c)\n\n t << c\n @pos += 1\n end\n\n case t\n when ''\n nil\n when /^\\d+$/\n t.to_i\n else\n t\n end\n end", "def line_break\n append '(?:\\n|(?:\\r\\n))'\n end", "def cursor_eol\n # pcol is based on max length not current line's length\n @pcol = @content_cols - @cols - 1\n _arr = _getarray\n @curpos = _arr[@current_index].size\n @repaint_required = true\n end", "def count_trailing_newlines(text)\n if text.end_with? \"\\n\"\n count = 0\n\n text.reverse.chars do |c|\n if c == \"\\n\"\n count += 1\n else\n break\n end\n end\n\n count\n else\n 0\n end\n end", "def next_token\n result = peek_token\n @start = @finish\n return result if @start >= @expr.length\n\n if @expr[@start].numeric?\n @finish = @start + 1\n while @finish < @expr.length && @expr[@finish].to_s.numeric?\n @finish = @finish + 1\n end\n else\n @finish = @start + 1\n end\n result\n end", "def line(pos = T.unsafe(nil)); end", "def rl_crlf()\r\n if (@_rl_term_cr)\r\n @_rl_out_stream.write(@_rl_term_cr)\r\n end\r\n @_rl_out_stream.write(\"\\n\")\r\n return 0\r\n end", "def eol!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 4 )\n\n\n\n type = EOL\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 269:3: ';'\n match( 0x3b )\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 4 )\n\n\n end", "def _linear_white_space\n _save = self.pos\n\n _save1 = self.pos\n while true # sequence\n _save2 = self.pos\n _tmp = apply(:_CRLF)\n unless _tmp\n _tmp = true\n self.pos = _save2\n end\n unless _tmp\n self.pos = _save1\n break\n end\n _tmp = apply(:_LWSP_char)\n unless _tmp\n self.pos = _save1\n end\n break\n end # end sequence\n\n if _tmp\n while true\n\n _save3 = self.pos\n while true # sequence\n _save4 = self.pos\n _tmp = apply(:_CRLF)\n unless _tmp\n _tmp = true\n self.pos = _save4\n end\n unless _tmp\n self.pos = _save3\n break\n end\n _tmp = apply(:_LWSP_char)\n unless _tmp\n self.pos = _save3\n end\n break\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true\n else\n self.pos = _save\n end\n set_failed_rule :_linear_white_space unless _tmp\n return _tmp\n end", "def line_ending=(value)\n @line_ending = value || \"\\r\\n\"\n end", "def linebreak?(cell)\n cell.nonzero? && (cell % columns).zero?\n end", "def _eof_comment\n\n _save = self.pos\n begin # sequence\n _tmp = match_string(\"#\")\n break unless _tmp\n while true # kleene\n\n _save1 = self.pos\n begin # sequence\n _save2 = self.pos\n _tmp = apply(:_eof)\n _tmp = !_tmp\n self.pos = _save2\n break unless _tmp\n _tmp = match_dot\n end while false\n unless _tmp\n self.pos = _save1\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true # end kleene\n end while false\n unless _tmp\n self.pos = _save\n end # end sequence\n\n set_failed_rule :_eof_comment unless _tmp\n return _tmp\n end", "def next_token\n\n if @ss.bol?\n @line+=1\n @[email protected]\n end\n\n position=[@line,@ss.pos-@old_pos+1]\n\n return :eos if @ss.eos?\n\n case\n when text = @ss.scan(NEWLINE)\n next_token()\n when text = @ss.scan(SPACE)\n next_token()\n when text = @ss.scan(COMMENT)\n next_token()\n when text = @ss.scan(ARROW)\n return Token.new [:arrow,text,position]\n when text = @ss.scan(LT)\n return Token.new [:lt,text,position]\n when text = @ss.scan(LBRACK)\n return Token.new [:lbrack,text,position]\n when text = @ss.scan(RBRACK)\n return Token.new [:rbrack,text,position]\n when text = @ss.scan(IDENTIFIER)\n case\n when value = text.match(IDENT)\n return Token.new [:IDENT,text,position]\n when value = text.match(FLOAT)\n return Token.new [:FLOAT,text,position]\n when value = text.match(INT)\n return Token.new [:INT,text,position]\n when value = text.match(STRING)\n return Token.new [:STRING,text,position]\n when value = text.match(MODULE)\n return Token.new [:module,text,position]\n when value = text.match(CLASS)\n return Token.new [:class,text,position]\n when value = text.match(END_)\n return Token.new [:end,text,position]\n when value = text.match(ATTR)\n return Token.new [:attr,text,position]\n when value = text.match(LPAREN)\n return Token.new [:lparen,text,position]\n when value = text.match(RPAREN)\n return Token.new [:rparen,text,position]\n else\n return Token.new [:identifier,text,position]\n end\n else\n x = @ss.getch\n return Token.new [x, x,position]\n end\n end", "def parse_blank_line; end", "def literal_token\n if match = @chunk.match(OPERATOR)\n value, _ = *match\n else\n value = @chunk[0]\n end\n tag = value\n\n if COMPOUND_ASSIGN.include?(value)\n tag = :COP\n else\n case value\n when '(', '{', '[' then @ends.push(INVERSES[value])\n when ')', '}', ']'\n prev = @tokens[-1]\n pair(value)\n tokens.delete_at(-1) if prev && prev[0] == :TERM\n end\n end\n token(tag, value)\n value.size\n end", "def suppress_newlines\n @tokens.pop if value[0] == ?\\\n end", "def octal_integer_literal\n code = @codes[@pos]\n if code.nil?\n return nil\n elsif code == 0x30 and (code1 = @codes[@pos + 1]) >= 0x30 and code1 <= 0x37\n @pos += 1\n pos0 = @pos\n while code = @codes[@pos] and code >= 0x30 and code <= 0x37\n @pos += 1\n end\n if identifier_start?(code)\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n else\n return ECMA262::ECMA262Numeric.new(@codes[pos0...@pos].pack(\"U*\").to_i(8))\n end\n else\n nil\n end\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n if ss.peek(1) == \"\\n\"\n self.lineno += 1\n # line starts 1 position after the newline\n self.start_of_current_line_pos = ss.pos + 1\n end\n self.old_pos = ss.pos\n token =\n case state\n when nil then\n case\n when ss.skip(/[ \\t]+/) then\n # do nothing\n when ss.skip(/\\/\\/[^\\r\\n]*/) then\n # do nothing\n when text = ss.scan(/\\r|\\n/) then\n newline text\n when text = ss.scan(/[!=<>]=?/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/[(){},;.\\-+\\/*]/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/#{DIGIT}+(\\.#{DIGIT}+)?/) then\n action { [:NUMBER, text] }\n when text = ss.scan(/nil/) then\n action { [:NIL, text] }\n when text = ss.scan(/false/) then\n action { [:FALSE, text] }\n when text = ss.scan(/true/) then\n action { [:TRUE, text] }\n when text = ss.scan(/#{ALPHA}(#{ALPHA}|#{DIGIT})*/) then\n action { [:IDENTIFIER, text] }\n when ss.skip(/\"\"/) then\n action { [:STRING, '\"\"'] }\n when ss.skip(/\"/) then\n [:state, :IN_STRING]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n when :IN_STRING then\n case\n when text = ss.scan(/[^\"]+/) then\n action { [:STRING, \"\\\"#{text}\\\"\"] }\n when ss.skip(/\"/) then\n [:state, nil]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def lex_en_line_begin; end", "def lex_en_line_begin; end", "def lex_en_line_begin; end", "def end_line(kind); end", "def nextChar\n if getChar == \"\\n\"\n @line += 1\n @column = @lastLineSize\n end\n @index += 1\n @column += 1\n end", "def line\n return 1 unless lexing_context && locator\n locator.line_for_offset(lexing_context[:end_offset])\n end", "def line_token\n return unless match = @chunk.match(MULTI_DENT)\n\n indent, _ = *match\n size = indent.size - 1\n incomplete = unfinished\n\n #\n # elements.\n # ...foo. <- size: 4, indebt: 0, indent: 0\n # ...bar. <- size: 4, indebt: 4, indent: 4\n # -- case 1. finished && outdented\n # batman <- size: 0, indebt: 4, indent: 0\n # -- case 2. finished && same indent\n # ...batman size: 4, indebt: 4, indent: 4\n #\n if size - @indebt == @indent\n incomplete ? suppress_newlines : newline_token(0)\n return indent.size\n end\n\n if size > @indent\n if incomplete\n @indebt = size - @indent\n suppress_newlines\n return indent.size\n end\n if @tokens.size.zero?\n @base_indent = @indent = size\n return indent.size\n end\n indent_token(size, indent.size - size)\n elsif size < @base_indent\n raise_syntax_error! 'missing indentation'\n else\n @indebt = 0\n outdent_token(@indent - size, incomplete, indent.size)\n end\n\n indent.size\n end", "def test_rchomp_without_beginning_newline\n string = 'hello'\n assert_eq @buffer.rchomp(string), string\n end", "def lex_end line\n ends << line\n end", "def convert_blank(*)\n NEWLINE\n end", "def line(node = @current_node)\n\t\treturn is_valid(node) ? node.line : nil\n\tend", "def end_of_line\n self.cursor = :end\n end", "def kill_line(*)\n if current_buffer[current_buffer.point] == \"\\n\"\n current_buffer.slice!(current_buffer.point)\n return true\n end\n\n line_end = current_buffer.index(/$/, current_buffer.point)\n current_buffer.slice!(current_buffer.point, line_end - current_buffer.point)\n true\nend", "def find_end_line(node)\n if node.if_type? && node.else?\n node.loc.else.line\n elsif node.if_type? && node.ternary?\n node.else_branch.loc.line\n elsif node.if_type? && node.elsif?\n node.each_ancestor(:if).find(&:if?).loc.end.line\n elsif node.block_type? || node.numblock_type?\n node.loc.end.line\n elsif (next_sibling = node.right_sibling) && next_sibling.is_a?(AST::Node)\n next_sibling.loc.line\n elsif (parent = node.parent)\n parent.loc.respond_to?(:end) && parent.loc.end ? parent.loc.end.line : parent.loc.line\n else\n node.loc.end.line\n end\n end", "def _BlockDelimiter\n\n _save = self.pos\n while true # choice\n _tmp = apply(:_BlockHead)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_BlockEnd)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_BlockDelimiter unless _tmp\n return _tmp\n end", "def reduce_carriage_return(_production, _range, _tokens, _children)\n Regex::Character.new('\\r')\n end", "def line\n\t return -1 if @inputStack.empty? # only if initialize() arg is bogus\n\n\t input = @inputStack[0] # not @inputStack.last\n\t str = input.string[0 .. input.pos]\n\t return str.count(\"\\n\") + 1\n\tend", "def _ParagraphDelimiter\n\n _save = self.pos\n while true # choice\n _tmp = apply(:_BlockDelimiter)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_PreformattedCommandHead)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_LineBlock)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_Newpage)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_HeadedStart)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_ParagraphDelimiter unless _tmp\n return _tmp\n end", "def line_numbers\n return (line..line) unless @value && text\n\n end_line = if !lines.empty?\n line + lines.count - 1\n elsif children.empty?\n nontrivial_end_line\n else\n line\n end\n\n (line..end_line)\n end", "def terminator; end", "def terminator; end", "def parse_nl\n s0 = @scanner.pos\n s2 = parse_newline\n if s2 == :failed\n s1 = :failed\n else\n s1 = []\n while s2 != :failed\n s1 << s2\n s2 = parse_newline\n end\n end\n if s1 == :failed\n @scanner.pos = s0\n :failed\n else\n s2 = []\n s3 = parse_skipline\n while s3 != :failed\n s2 << s3\n s3 = parse_skipline\n end\n [s1, s2]\n end\n end", "def next()\n return \" \" unless has_next()\n if(@count <= 0)\n @char = @compressed_string[@i]\n @i += 1\n @count = get_count()\n end\n @count -= 1\n return @char\n end", "def newline!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 100 )\n\n type = NEWLINE\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 914:5: ( '\\\\n' | '\\\\r' )+\n # at file 914:5: ( '\\\\n' | '\\\\r' )+\n match_count_29 = 0\n while true\n alt_29 = 2\n look_29_0 = @input.peek( 1 )\n\n if ( look_29_0 == 0xa || look_29_0 == 0xd )\n alt_29 = 1\n\n end\n case alt_29\n when 1\n # at line \n if @input.peek(1) == 0xa || @input.peek(1) == 0xd\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n\n else\n match_count_29 > 0 and break\n eee = EarlyExit(29)\n\n\n raise eee\n end\n match_count_29 += 1\n end\n\n # --> action\n channel = HIDDEN \n # <-- action\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 100 )\n\n end" ]
[ "0.62339807", "0.6189396", "0.58698416", "0.5824755", "0.58021665", "0.5767977", "0.5617591", "0.5596281", "0.5551471", "0.5545358", "0.5520693", "0.54861367", "0.54738474", "0.54681635", "0.5443173", "0.5406055", "0.5393491", "0.5372871", "0.5351362", "0.53352094", "0.53298765", "0.5320925", "0.53029287", "0.5295505", "0.5277303", "0.524932", "0.5246897", "0.5217729", "0.52129686", "0.51858354", "0.51670665", "0.51620775", "0.5159024", "0.51513684", "0.5145809", "0.5136168", "0.5103626", "0.5085088", "0.5082034", "0.5076625", "0.5068639", "0.50635624", "0.50598544", "0.5059427", "0.5039771", "0.5032518", "0.50324696", "0.4992197", "0.49753362", "0.49647546", "0.49617195", "0.49592695", "0.49298945", "0.4919539", "0.49090913", "0.49068853", "0.49063024", "0.49012384", "0.4893213", "0.4883903", "0.48822308", "0.48728472", "0.48658738", "0.48548582", "0.48542878", "0.4848516", "0.48445186", "0.48278978", "0.48272187", "0.48229128", "0.48170575", "0.48136887", "0.4809681", "0.48078382", "0.47992557", "0.479541", "0.47921345", "0.47921345", "0.47921345", "0.4790187", "0.47899592", "0.478978", "0.478664", "0.4786427", "0.47843832", "0.47790343", "0.47741568", "0.47554177", "0.47445384", "0.4743387", "0.47402534", "0.4733325", "0.47308847", "0.47253728", "0.4710179", "0.470813", "0.470813", "0.47045702", "0.47042117", "0.47007152" ]
0.8169687
0
Tests next literal is Comment or not. If literal is Comment return ECMA262::MultiLineComment or SingeLineComment object and forward lexical parser position. Otherwise return nil and position is not changed.
def comment multi_line_comment || single_line_comment end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_comment\n return false unless @lexer.get and @lexer.get.type == :comment_start\n @lexer.next!\n\n buf = ''\n while token = @lexer.get\n break if token.type == :comment_end\n buf << token.value\n @lexer.next!\n end\n\n found :comment, buf\n @lexer.next!\n true\n end", "def single_line_comment\n # //\n if @codes[@pos] == 0x2f and @codes[@pos + 1] == 0x2f\n @pos += 2\n pos0 = @pos\n while (code = @codes[@pos]) and !line_terminator?(code)\n @pos += 1\n end\n return ECMA262::SingleLineComment.new(@codes[pos0...@pos].pack(\"U*\"))\n else\n nil\n end\n end", "def _Comment\n\n _save = self.pos\n while true # sequence\n _tmp = apply(:__hyphen_)\n unless _tmp\n self.pos = _save\n break\n end\n _tmp = match_string(\"//\")\n unless _tmp\n self.pos = _save\n break\n end\n while true\n\n _save2 = self.pos\n while true # sequence\n _save3 = self.pos\n _tmp = apply(:_Nl)\n _tmp = _tmp ? nil : true\n self.pos = _save3\n unless _tmp\n self.pos = _save2\n break\n end\n _tmp = get_byte\n unless _tmp\n self.pos = _save2\n end\n break\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true\n unless _tmp\n self.pos = _save\n break\n end\n _tmp = apply(:_Nl)\n unless _tmp\n self.pos = _save\n break\n end\n while true\n _tmp = apply(:_EmptyLine)\n break unless _tmp\n end\n _tmp = true\n unless _tmp\n self.pos = _save\n end\n break\n end # end sequence\n\n set_failed_rule :_Comment unless _tmp\n return _tmp\n end", "def multi_line_comment\n # /*\n if @codes[@pos] == 0x2f and @codes[@pos + 1] == 0x2a\n @pos += 2\n pos0 = @pos\n # */\n while (code = @codes[@pos] != 0x2a) or @codes[@pos + 1] != 0x2f\n raise ParseError.new(\"no `*/' at end of comment\", self) if code.nil?\n @pos += 1\n end\n @pos +=2\n return ECMA262::MultiLineComment.new(@codes[pos0...(@pos-2)].pack(\"U*\"))\n else\n nil\n end\n end", "def _comment\n\n _save = self.pos\n while true # sequence\n _tmp = match_string(\"#\")\n unless _tmp\n self.pos = _save\n break\n end\n while true\n\n _save2 = self.pos\n while true # sequence\n _save3 = self.pos\n _tmp = apply(:_eol)\n _tmp = _tmp ? nil : true\n self.pos = _save3\n unless _tmp\n self.pos = _save2\n break\n end\n _tmp = get_byte\n unless _tmp\n self.pos = _save2\n end\n break\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true\n unless _tmp\n self.pos = _save\n break\n end\n _tmp = apply(:_eol)\n unless _tmp\n self.pos = _save\n end\n break\n end # end sequence\n\n set_failed_rule :_comment unless _tmp\n return _tmp\n end", "def _comment\n\n _save = self.pos\n while true # sequence\n _tmp = match_string(\"(\")\n unless _tmp\n self.pos = _save\n break\n end\n while true\n\n _save2 = self.pos\n while true # choice\n _tmp = apply(:_ctext)\n break if _tmp\n self.pos = _save2\n _tmp = apply(:_quoted_pair)\n break if _tmp\n self.pos = _save2\n _tmp = apply(:_comment)\n break if _tmp\n self.pos = _save2\n break\n end # end choice\n\n break unless _tmp\n end\n _tmp = true\n unless _tmp\n self.pos = _save\n break\n end\n _tmp = match_string(\")\")\n unless _tmp\n self.pos = _save\n end\n break\n end # end sequence\n\n set_failed_rule :_comment unless _tmp\n return _tmp\n end", "def _comment\n\n _save = self.pos\n begin # sequence\n _tmp = match_string(\"#\")\n break unless _tmp\n while true # kleene\n\n _save1 = self.pos\n begin # sequence\n _save2 = self.pos\n _tmp = apply(:_eol)\n _tmp = !_tmp\n self.pos = _save2\n break unless _tmp\n _tmp = match_dot\n end while false\n unless _tmp\n self.pos = _save1\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true # end kleene\n break unless _tmp\n _tmp = apply(:_eol)\n end while false\n unless _tmp\n self.pos = _save\n end # end sequence\n\n set_failed_rule :_comment unless _tmp\n return _tmp\n end", "def comment?\n return @assigned_paragraph_type == :comment if @assigned_paragraph_type\n return block_type.casecmp(\"COMMENT\") if begin_block? or end_block?\n return @line =~ /^[ \\t]*?#[ \\t]/\n end", "def consume_comments\n if @s.peek(2) == '/*'\n @s.consume\n @s.consume\n\n if text = @s.scan_until(RE_COMMENT_CLOSE)\n text.slice!(-2, 2)\n else\n # Parse error.\n text = @s.consume_rest\n end\n\n return create_token(:comment, :value => text)\n end\n\n nil\n end", "def comment?\n @kind == :line_comment || @kind == :block_comment\n end", "def parse_comment\n s0 = @scanner.pos\n if match_str('*') == :failed\n @scanner.pos = s0\n s0 = :failed\n else\n s2 = parse_nonls\n if parse_nl == :failed\n @scanner.pos = s0\n s0 = :failed\n else\n @reported_pos = s0\n s0 = s2.join\n end\n end\n if s0 == :failed\n s0 = @scanner.pos\n s1 = match_str('&')\n if s1 == :failed\n @scanner.pos = s0\n s0 = :failed\n else\n s2 = parse_nonls\n if parse_nl == :failed\n @scanner.pos = s0\n s0 = :failed\n else\n @reported_pos = s0\n s0 = '&' + s2.join\n end\n end\n end\n s0\n end", "def _comment\n\n _save = self.pos\n while true # sequence\n _tmp = match_string(\"#\")\n unless _tmp\n self.pos = _save\n break\n end\n while true\n\n _save2 = self.pos\n while true # sequence\n _save3 = self.pos\n _tmp = apply(:_end_hyphen_of_hyphen_line)\n _tmp = _tmp ? nil : true\n self.pos = _save3\n unless _tmp\n self.pos = _save2\n break\n end\n _tmp = apply(:_utf8)\n unless _tmp\n self.pos = _save2\n end\n break\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true\n unless _tmp\n self.pos = _save\n end\n break\n end # end sequence\n\n set_failed_rule :_comment unless _tmp\n return _tmp\n end", "def comment?\n @contents[0] == :comment\n end", "def comment?\n type == COMMENT_NODE\n end", "def comment?\n node_type == COMMENT_NODE\n end", "def detect_comments\n if @input =~ %r{^\\s*[/]{2}}\n @mode = :comment\n @expression = ''\n end\n end", "def read_line_comment(token)\n token.kind = :line_comment\n read_next() while (current = peek_next()) && current != ?\\n\n token.value = @source[token.from .. @marker.source_index] if !@skip_comments\n end", "def _HtmlComment\n\n _save = self.pos\n while true # sequence\n _tmp = match_string(\"<!--\")\n unless _tmp\n self.pos = _save\n break\n end\n while true\n\n _save2 = self.pos\n while true # sequence\n _save3 = self.pos\n _tmp = match_string(\"-->\")\n _tmp = _tmp ? nil : true\n self.pos = _save3\n unless _tmp\n self.pos = _save2\n break\n end\n _tmp = get_byte\n unless _tmp\n self.pos = _save2\n end\n break\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true\n unless _tmp\n self.pos = _save\n break\n end\n _tmp = match_string(\"-->\")\n unless _tmp\n self.pos = _save\n end\n break\n end # end sequence\n\n set_failed_rule :_HtmlComment unless _tmp\n return _tmp\n end", "def extract_comment comment\n case comment\n when Array then\n comment.map do |c|\n extract_comment c\n end\n when nil\n RDoc::Comment.new ''\n when RDoc::Comment then\n if comment.text =~ /^#[ \\t]*:section:.*\\n/ then\n start = $`\n rest = $'\n\n comment.text = if start.empty? then\n rest\n else\n rest.sub(/#{start.chomp}\\Z/, '')\n end\n end\n\n comment\n when RDoc::Markup::Document then\n comment\n else\n raise TypeError, \"unknown comment #{comment.inspect}\"\n end\n end", "def comment\n comment = buffer.options.comment_line.to_s\n indent = nil\n lines = []\n\n each_line do |line, fc, tc|\n line_fc = \"#{line}.#{fc}\"\n line_tc = \"#{line}.#{tc}\"\n\n next if buffer.at_end == line_tc\n\n lines << line\n\n next if indent == 0 # can't get lower\n\n line = buffer.get(\"#{line}.#{fc}\", \"#{line}.#{tc}\")\n\n next unless start = line =~ /\\S/\n\n indent ||= start\n indent = start if start < indent\n end\n\n indent ||= 0\n\n buffer.undo_record do |record|\n lines.each do |line|\n record.insert(\"#{line}.#{indent}\", comment)\n end\n end\n end", "def comment_token\n return unless match = @chunk.match(COMMENT)\n # _, comment = *match\n # token(:COMMENT, comment, 0, comment.size)\n match[0].size\n end", "def process_initial_comment(tk)\n if @statement.empty? && (@comments_last_line || 0) < tk.line_no - 2\n @comments = nil\n end\n\n return unless tk.class == TkCOMMENT\n\n case tk.text\n when Parser::SourceParser::SHEBANG_LINE\n if !@last_ns_tk && !@encoding_line\n @shebang_line = tk.text\n return\n end\n when Parser::SourceParser::ENCODING_LINE\n if (@last_ns_tk.class == TkCOMMENT && @last_ns_tk.text == @shebang_line) ||\n !@last_ns_tk\n @encoding_line = tk.text\n return\n end\n end\n\n return if [email protected]? && @comments\n return if @first_line && tk.line_no > @first_line\n\n if @comments_last_line && @comments_last_line < tk.line_no - 1\n if @comments && @statement.empty?\n @tokens.unshift(tk)\n return @done = true\n end\n @comments = nil\n end\n @comments_line = tk.line_no unless @comments\n\n # Remove the \"#\" and up to 1 space before the text\n # Since, of course, the convention is to have \"# text\"\n # and not \"#text\", which I deem ugly (you heard it here first)\n @comments ||= []\n if tk.text.start_with?('=begin')\n lines = tk.text.count(\"\\n\")\n @comments += tk.text.gsub(/\\A=begin.*\\r?\\n|\\r?\\n=end.*\\r?\\n?\\Z/, '').split(/\\r?\\n/)\n @comments_last_line = tk.line_no + lines\n else\n @comments << tk.text.gsub(/^(#+)\\s{0,1}/, '')\n @comments_hash_flag = $1 == '##' if @comments_hash_flag.nil?\n @comments_last_line = tk.line_no\n end\n @comments.pop if @comments.size == 1 && @comments.first =~ /^\\s*$/\n true\n end", "def parse_comments\n @data[4][0]\n end", "def ml_comment!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 40)\n\n type = ML_COMMENT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 478:4: '/*' ( . )* '*/'\n match(\"/*\")\n # --> action\n if @input.peek(1) == ?* then type = DOC_COMMENT else channel = HIDDEN end \n # <-- action\n # at line 478:88: ( . )*\n loop do #loop 4\n alt_4 = 2\n look_4_0 = @input.peek(1)\n\n if (look_4_0 == ?*) \n look_4_1 = @input.peek(2)\n\n if (look_4_1 == ?/) \n alt_4 = 2\n elsif (look_4_1.between?(0x0000, ?.) || look_4_1.between?(?0, 0xFFFF)) \n alt_4 = 1\n\n end\n elsif (look_4_0.between?(0x0000, ?)) || look_4_0.between?(?+, 0xFFFF)) \n alt_4 = 1\n\n end\n case alt_4\n when 1\n # at line 478:88: .\n match_any\n\n else\n break #loop 4\n end\n end\n match(\"*/\")\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 40)\n\n end", "def lex\n @index += 1\n while lexer.tokens[@index] === :COMMENT\n @index += 1\n end\n lexer.tokens[@index] or unexpected_error(:EOF)\n end", "def _comment\n\n _save = self.pos\n while true # choice\n _tmp = scan(/\\A(?-mix:--.*?$)/)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_multi_comment)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_comment unless _tmp\n return _tmp\n end", "def collect_first_comment\n skip_tkspace\n comment = ''.dup\n comment = RDoc::Encoding.change_encoding comment, @encoding if @encoding\n first_line = true\n first_comment_tk_kind = nil\n line_no = nil\n\n tk = get_tk\n\n while tk && (:on_comment == tk[:kind] or :on_embdoc == tk[:kind])\n comment_body = retrieve_comment_body(tk)\n if first_line and comment_body =~ /\\A#!/ then\n skip_tkspace\n tk = get_tk\n elsif first_line and comment_body =~ /\\A#\\s*-\\*-/ then\n first_line = false\n skip_tkspace\n tk = get_tk\n else\n break if first_comment_tk_kind and not first_comment_tk_kind === tk[:kind]\n first_comment_tk_kind = tk[:kind]\n\n line_no = tk[:line_no] if first_line\n first_line = false\n comment << comment_body\n tk = get_tk\n\n if :on_nl === tk then\n skip_tkspace_without_nl\n tk = get_tk\n end\n end\n end\n\n unget_tk tk\n\n new_comment comment, line_no\n end", "def comment(_lexeme, character)\n if character =~ /./\n :comment\n else\n :default\n end\n end", "def parse_comment container, tk, comment\n return parse_comment_tomdoc container, tk, comment if @markup == 'tomdoc'\n column = tk[:char_no]\n line_no = comment.line.nil? ? tk[:line_no] : comment.line\n\n comment.text = comment.text.sub(/(^# +:?)(singleton-)(method:)/, '\\1\\3')\n singleton = !!$~\n\n co =\n if (comment.text = comment.text.sub(/^# +:?method: *(\\S*).*?\\n/i, '')) && !!$~ then\n line_no += $`.count(\"\\n\")\n parse_comment_ghost container, comment.text, $1, column, line_no, comment\n elsif (comment.text = comment.text.sub(/# +:?(attr(_reader|_writer|_accessor)?): *(\\S*).*?\\n/i, '')) && !!$~ then\n parse_comment_attr container, $1, $3, comment\n end\n\n if co then\n co.singleton = singleton\n co.line = line_no\n end\n\n true\n end", "def read_block_comment(token)\n token.kind = :block_comment\n\n read_next()\n while (current = read_next())\n if current == ?* && peek_next() == ?/\n current = read_next()\n break\n end\n end\n\n raise_error(:unterminated_block_comment, \"Unterminated block comment\") if !current\n token.value = @source[token.from .. @marker.source_index] if !@skip_comments\n end", "def comment\n @comment ||= begin\n space = node.previous_sibling and\n space.to_s.blank? && space.to_s.count(\"\\n\") == 1 and\n comment_node = space.previous_sibling\n\n if comment_node.is_a?(REXML::Comment)\n doc.restore_erb_scriptlets(comment_node.to_s.strip)\n end\n end\n end", "def lex_comment(input)\n case input\n when /\\A\\s*\\/\\// # single line comment //\n if /\\A\\s*(\\/\\/.*?\\n)/.match(input)\n return :comment, $1, $'\n else\n return :open, '//', input\n end\n when /\\A\\s*\\/\\*/m # multi-line comment /* */\n if /\\A\\s*(\\/\\*.*?\\*\\/)/m.match(input)\n return :comment, $1, $'\n else\n return :open, '/*', input\n end\n when /\\A\\s*@\"/ # objective C string\n if :objective_c == @language\n token, value, rest = lex_string($')\n if :open == token\n return :open, '@\"', input\n elsif :error == token\n return :error, nil, input\n else\n return :string, '@\"' + value, rest\n end\n else\n return :error, nil, input\n end\n when /\\A\\s*\"/ # double quoted string \" \"\n token, value, rest = lex_string($')\n if :open == token\n return :open, '\"', input\n elsif :error == token\n return :error, nil, input\n else\n return :string, '\"' + value, rest\n end\n when /\\A\\s*'/ # char literal ' '\n token, value, rest = lex_char($')\n if :open == token\n return :open, \"'\", input\n elsif :error == token\n return :error, nil, input\n else\n return :char, \"'\" + value, rest\n end\n when /\\A\\s*(@#{@regex_identifier})/ # objective c directive\n value, rest = $1, $'\n if @keywords.include?(value)\n return :keyword, value, rest\n else\n return :error, nil, input\n end\n when /\\A\\s*(#{@regex_identifier})/\n value, rest = $1, $'\n if @keywords.include?(value)\n return :keyword, value, rest\n elsif @unique_tokens.has_key?(value)\n return @unique_tokens[value], value, rest\n else\n return :identifier, value, rest\n end\n when /\\A\\s*(#{@regex_float})/\n return :float, $1, $'\n when /\\A\\s*(#{@regex_hex_float})/\n return :float, $1, $'\n when /\\A\\s*(#{@regex_integer})/\n return :integer, $1, $'\n when /\\A\\s*(\\S.*)\\z/m\n val, rest = lex_punctuator($1)\n if val\n return :punctuator, val, rest\n else\n return :error, nil, input\n end\n else\n return :end\n end\n end", "def commentState\n data = @stream.char()\n if data == \"-\"\n @state = @states[\"commentDash\"]\n elsif data == EOF\n # XXX parse error\n @tokenQueue.append(@currentToken)\n @state = @states[\"data\"]\n else\n @currentToken[\"data\"] += data + @stream.charsUntil(\"-\")\n end\n return true\n end", "def comment_token\n if md=HERE_COMMENT.match(@chunk)\n input, comment, body = md.to_a\n token :HereComment, body, :newLine => true\n token :Terminator, \"\\n\"\n @line += count(comment, \"\\n\")\n return comment.length\n elsif md=COMMENT.match(@chunk)\n input, comment, body = md.to_a\n token :Comment, body\n return comment.length\n end\n\n return nil\n end", "def is_comment?\n self.object_type.downcase.to_s == \"comment\" || self.object_type.blank?\n end", "def _eof_comment\n\n _save = self.pos\n begin # sequence\n _tmp = match_string(\"#\")\n break unless _tmp\n while true # kleene\n\n _save1 = self.pos\n begin # sequence\n _save2 = self.pos\n _tmp = apply(:_eof)\n _tmp = !_tmp\n self.pos = _save2\n break unless _tmp\n _tmp = match_dot\n end while false\n unless _tmp\n self.pos = _save1\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true # end kleene\n end while false\n unless _tmp\n self.pos = _save\n end # end sequence\n\n set_failed_rule :_eof_comment unless _tmp\n return _tmp\n end", "def parse_with_comments(source_buffer); end", "def parse_with_comments(source_buffer); end", "def parse_with_comments(source_buffer); end", "def consume_comment(input)\n while not input.eof? do\n case input.look_ahead\n when \"\\\\\" : \n # In comments, only escaped backslashes and line endings matter\n if [\"\\n\", \"\\\\\"].include? input.look_ahead(1)\n input.consume\n end\n when \"\\n\" : input.consume; break \n end\n input.consume\n end\n end", "def visit_comment(node)\n line = @original_haml_lines[node.line - 1]\n indent = line.index(/\\S/)\n @ruby_chunks << PlaceholderMarkerChunk.new(node, 'comment', indent: indent)\n end", "def is_comment(line)\n result = false\n # If source supports single line comments\n if comment_symbols[:single_line]\n result ||= line =~ /^\\s*#{Regexp.escape(comment_symbols[:single_line])}/\n end\n\n # If source supports multi-line comments\n if comment_symbols[:multiline]\n result ||= line =~ /^\\s*#{Regexp.escape(comment_symbols[:multiline][:begin])}/\n end\n result\n end", "def process_block_comment!(index, tokens, ranges)\n start_index = index\n line_num = line_for_offset(tokens[index][1])\n while index < tokens.length - 2\n break unless tokens[index + 1][0] == :TOKEN_COMMENT\n next_line = line_for_offset(tokens[index + 1][1])\n # Tokens must be on contiguous lines\n break unless next_line == line_num + 1\n # Must not be a region comment\n comment = extract_text(tokens[index + 1][1])\n break if start_region?(comment) || end_region?(comment)\n # It's a block comment\n line_num = next_line\n index += 1\n end\n\n return index if start_index == index\n\n add_range!(create_range_span_tokens(tokens[start_index][1], tokens[index][1], REGION_COMMENT), ranges)\n index\n end", "def comment?\n [\"text\", \"text_wholepage\"].include?(@type)\n end", "def visit_Comment(comment, *rest)\n end", "def preceding_comment?(node1, node2); end", "def parse_comment(raw)\n c = nil\n if raw =~ /\\A\\((.+?)\\)(.+)\\z/\n c, raw = [$2, $1]\n end\n if raw =~ /\\A(.+)\\((.+?)\\)\\z/\n raw, c = [$1, $2]\n end\n [raw, c]\n end", "def wrap_final_comment\n current = wrap_rwhitespace(whitespaces: /\\A[ \\t\\r\\f]+/)\n if @source_buffer.slice(current.end_pos) != '#'\n # No comment, do nothing\n return self\n end\n comment = @source_buffer.slice(current.end_pos..-1)[/\\A[^\\n]+/] || ''\n current.adjust(end_pos: comment.size)\n end", "def is_comment?(line)\n line =~ /^\\s*#/\n end", "def first_is_comment?(page)\n\t\treturn false unless page || page.list || page.list[0]\n\t\treturn false unless page.list[0].code == 108\n\t\tindex = 1\n\t\tlist = [page.list[0].parameters[0]]\n\t\twhile page.list[index].code == 408\n\t\t\tlist << page.list[index].parameters[0]\n\t\t\tindex += 1\n\t\tend\n\t\treturn list.collect{|line|line+=\" \"}.join\n\tend", "def is_comment?(line)\n true if line =~ /^\\#.*$/\n end", "def parse_with_comments(source_buffer)\n @lexer.comments = []\n\n [ parse(source_buffer), @lexer.comments ]\n ensure\n @lexer.comments = nil\n end", "def parse_comments(result)\n parse_type(result, \"comment\")\n end", "def comment(string)\n case string.strip # strip leading and trailing whitespaces\n when /^body=\"start\"/ # match starting comment\n @interesting = true\n when /^body=\"end\"/\n @interesting = false # match closing comment\n end\n end", "def comment(string)\n case string.strip # strip leading and trailing whitespaces\n when /^body=\"start\"/ # match starting comment\n @interesting = true\n when /^body=\"end\"/\n @interesting = false # match closing comment\n end\n end", "def comment!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 49 )\n\n\n\n type = COMMENT\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 228:11: '//' ( . )* ( '\\\\n' | '\\\\r' )\n match( \"//\" )\n\n # at line 228:16: ( . )*\n while true # decision 6\n alt_6 = 2\n look_6_0 = @input.peek( 1 )\n\n if ( look_6_0 == 0xa || look_6_0 == 0xd )\n alt_6 = 2\n elsif ( look_6_0.between?( 0x0, 0x9 ) || look_6_0.between?( 0xb, 0xc ) || look_6_0.between?( 0xe, 0xffff ) )\n alt_6 = 1\n\n end\n case alt_6\n when 1\n # at line 228:16: .\n match_any\n\n else\n break # out of loop for decision 6\n end\n end # loop for decision 6\n\n if @input.peek(1) == 0xa || @input.peek(1) == 0xd\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n # --> action\n channel = HIDDEN;\n # <-- action\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 49 )\n\n\n end", "def lex_comment line\n # do nothing\n end", "def comment!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 13 )\n\n type = COMMENT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 63:5: '#' (~ ( '\\\\r' | '\\\\n' ) )*\n match( 0x23 )\n # at line 63:9: (~ ( '\\\\r' | '\\\\n' ) )*\n while true # decision 22\n alt_22 = 2\n look_22_0 = @input.peek( 1 )\n\n if ( look_22_0.between?( 0x0, 0x9 ) || look_22_0.between?( 0xb, 0xc ) || look_22_0.between?( 0xe, 0xffff ) )\n alt_22 = 1\n\n end\n case alt_22\n when 1\n # at line 63:11: ~ ( '\\\\r' | '\\\\n' )\n if @input.peek( 1 ).between?( 0x0, 0x9 ) || @input.peek( 1 ).between?( 0xb, 0xc ) || @input.peek( 1 ).between?( 0xe, 0xff )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n\n else\n break # out of loop for decision 22\n end\n end # loop for decision 22\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 13 )\n\n end", "def comment(string); end", "def comment(string); end", "def comment(string); end", "def comment(string); end", "def comment_line?(line_source); end", "def comment_line?(line_source); end", "def comment!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 15)\n\n type = COMMENT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 148:3: ( ( '#' | '//' ) (~ '\\\\n' )* | '/*' ( . )* '*/' )\n alt_10 = 2\n look_10_0 = @input.peek(1)\n\n if (look_10_0 == ?#) \n alt_10 = 1\n elsif (look_10_0 == ?/) \n look_10_2 = @input.peek(2)\n\n if (look_10_2 == ?/) \n alt_10 = 1\n elsif (look_10_2 == ?*) \n alt_10 = 2\n else\n nvae = NoViableAlternative(\"\", 10, 2)\n raise nvae\n end\n else\n nvae = NoViableAlternative(\"\", 10, 0)\n raise nvae\n end\n case alt_10\n when 1\n # at line 148:5: ( '#' | '//' ) (~ '\\\\n' )*\n # at line 148:5: ( '#' | '//' )\n alt_7 = 2\n look_7_0 = @input.peek(1)\n\n if (look_7_0 == ?#) \n alt_7 = 1\n elsif (look_7_0 == ?/) \n alt_7 = 2\n else\n nvae = NoViableAlternative(\"\", 7, 0)\n raise nvae\n end\n case alt_7\n when 1\n # at line 148:7: '#'\n match(?#)\n\n when 2\n # at line 148:13: '//'\n match(\"//\")\n\n end\n # at line 148:20: (~ '\\\\n' )*\n while true # decision 8\n alt_8 = 2\n look_8_0 = @input.peek(1)\n\n if (look_8_0.between?(0x0000, ?\\t) || look_8_0.between?(0x000B, 0xFFFF)) \n alt_8 = 1\n\n end\n case alt_8\n when 1\n # at line 148:20: ~ '\\\\n'\n if @input.peek(1).between?(0x0000, ?\\t) || @input.peek(1).between?(0x000B, 0x00FF)\n @input.consume\n else\n mse = MismatchedSet(nil)\n recover(mse)\n raise mse\n end\n\n\n\n else\n break # out of loop for decision 8\n end\n end # loop for decision 8\n\n when 2\n # at line 149:5: '/*' ( . )* '*/'\n match(\"/*\")\n # at line 149:10: ( . )*\n while true # decision 9\n alt_9 = 2\n look_9_0 = @input.peek(1)\n\n if (look_9_0 == ?*) \n look_9_1 = @input.peek(2)\n\n if (look_9_1 == ?/) \n alt_9 = 2\n elsif (look_9_1.between?(0x0000, ?.) || look_9_1.between?(?0, 0xFFFF)) \n alt_9 = 1\n\n end\n elsif (look_9_0.between?(0x0000, ?)) || look_9_0.between?(?+, 0xFFFF)) \n alt_9 = 1\n\n end\n case alt_9\n when 1\n # at line 149:10: .\n match_any\n\n else\n break # out of loop for decision 9\n end\n end # loop for decision 9\n match(\"*/\")\n\n end\n \n @state.type = type\n @state.channel = channel\n # --> action\n skip \n # <-- action\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 15)\n\n end", "def adjust_comments\n scan_tokens do |prev, token, post, i|\n next 1 unless token[0] == :COMMENT\n before, after = @tokens[i - 2], @tokens[i + 2]\n if before && after &&\n ((before[0] == :INDENT && after[0] == :OUTDENT) ||\n (before[0] == :OUTDENT && after[0] == :INDENT)) &&\n before[1] == after[1]\n @tokens.delete_at(i + 2)\n @tokens.delete_at(i - 2)\n next 0\n elsif prev[0] == \"\\n\" && [:INDENT].include?(after[0])\n @tokens.delete_at(i + 2)\n @tokens[i - 1] = after\n next 1\n elsif ![\"\\n\", :INDENT, :OUTDENT].include?(prev[0])\n @tokens.insert(i, [\"\\n\", Value.new(\"\\n\", token[1].line)])\n next 2\n else\n next 1\n end\n end\n end", "def parse comment_location\n case comment_location\n when String then\n super\n when Array then\n docs = comment_location.map do |comment, location|\n doc = super comment\n doc.file = location\n doc\n end\n\n RDoc::Markup::Document.new(*docs)\n when RDoc::Comment then\n doc = super comment_location.text, comment_location.format\n doc.file = comment_location.location\n doc\n when RDoc::Markup::Document then\n return comment_location\n else\n raise ArgumentError, \"unknown comment class #{comment_location.class}\"\n end\n end", "def comment\n @comment ||= client.issue_comments(repository, pr_id).detect do |comment|\n UrlSectionBuilder.match?(comment[:body])\n end\n end", "def parse_with_comments(source); end", "def comment?; end", "def comment?; end", "def bogusCommentState\n @tokenQueue << {\"type\"=> \"Comment\", \"data\"=> @stream.charsUntil(\">\")}\n @stream.char()\n @state = @states[\"data\"]\n return true\n end", "def parse_comments(comments); end", "def comment_begins?\n\t\t\n\tend", "def associated_comment?(node, ast_with_comments)\n return false if ast_with_comments[node].empty?\n\n preceding_comment = ast_with_comments[node].last\n distance = node.loc.keyword.line - preceding_comment.loc.line\n return false if distance > 1\n\n !annotation?(preceding_comment)\n end", "def comment!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 35 )\n\n type = COMMENT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 350:9: '#' (~ ( '\\\\n' | '\\\\r' ) )*\n match( 0x23 )\n # at line 350:13: (~ ( '\\\\n' | '\\\\r' ) )*\n while true # decision 13\n alt_13 = 2\n look_13_0 = @input.peek( 1 )\n\n if ( look_13_0.between?( 0x0, 0x9 ) || look_13_0.between?( 0xb, 0xc ) || look_13_0.between?( 0xe, 0xffff ) )\n alt_13 = 1\n\n end\n case alt_13\n when 1\n # at line 350:13: ~ ( '\\\\n' | '\\\\r' )\n if @input.peek( 1 ).between?( 0x0, 0x9 ) || @input.peek( 1 ).between?( 0xb, 0xc ) || @input.peek( 1 ).between?( 0xe, 0xff )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n\n else\n break # out of loop for decision 13\n end\n end # loop for decision 13\n # --> action\n channel=HIDDEN;\n # <-- action\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 35 )\n\n end", "def add_comment comment\n comment = extract_comment comment\n\n return if comment.empty?\n\n case comment\n when RDoc::Comment then\n @comments << comment\n when RDoc::Markup::Document then\n @comments.concat comment.parts\n when Array then\n @comments.concat comment\n else\n raise TypeError, \"unknown comment type: #{comment.inspect}\"\n end\n end", "def nodoc_comment?(node, require_all: T.unsafe(nil)); end", "def remove_comment comment\n return if @comments.empty?\n\n case @comments\n when Array then\n @comments.delete_if do |my_comment|\n my_comment.file == comment.file\n end\n when RDoc::Markup::Document then\n @comments.parts.delete_if do |document|\n document.file == comment.file.name\n end\n else\n raise RDoc::Error, \"BUG: unknown comment class #{@comments.class}\"\n end\n end", "def first_comment\n get_or_make_reference('Comment', @data, 'first_comment_id')\n end", "def first_comment\n get_or_make_reference('Comment', @data, 'first_comment_id')\n end", "def multiline_comment!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 50 )\n\n\n\n type = MULTILINE_COMMENT\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 229:21: '/*' ( . )* '*/'\n match( \"/*\" )\n\n # at line 229:26: ( . )*\n while true # decision 7\n alt_7 = 2\n look_7_0 = @input.peek( 1 )\n\n if ( look_7_0 == 0x2a )\n look_7_1 = @input.peek( 2 )\n\n if ( look_7_1 == 0x2f )\n alt_7 = 2\n elsif ( look_7_1.between?( 0x0, 0x2e ) || look_7_1.between?( 0x30, 0xffff ) )\n alt_7 = 1\n\n end\n elsif ( look_7_0.between?( 0x0, 0x29 ) || look_7_0.between?( 0x2b, 0xffff ) )\n alt_7 = 1\n\n end\n case alt_7\n when 1\n # at line 229:26: .\n match_any\n\n else\n break # out of loop for decision 7\n end\n end # loop for decision 7\n\n\n match( \"*/\" )\n\n\n # --> action\n channel = HIDDEN;\n # <-- action\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 50 )\n\n\n end", "def associated_comment?(node, ast_with_comments)\n preceding_comments = preceding_comments(node, ast_with_comments)\n return false if preceding_comments.empty?\n\n distance = node.loc.keyword.line - preceding_comments.last.loc.line\n return false if distance > 1\n return false unless comment_line_only?(preceding_comments.last)\n\n # As long as there's at least one comment line that isn't an\n # annotation, it's OK.\n preceding_comments.any? { |comment| !annotation?(comment) }\n end", "def parse\n case @comments\n when String then\n super\n when Array then\n docs = @comments.map do |comment, location|\n doc = super comment\n doc.file = location if location\n doc\n end\n\n RDoc::Markup::Document.new(*docs)\n when RDoc::Comment then\n doc = super @comments.text, comments.format\n doc.file = @comments.location\n doc\n when RDoc::Markup::Document then\n return @comments\n else\n raise ArgumentError, \"unknown comment class #{comments.class}\"\n end\n end", "def allow_comments?\n case @comments\n when :guess\n @prompt && [email protected]? && !end_chars.include?('#')\n else\n @comments\n end\n end", "def comment\n decode_string_member(:comment)\n end", "def comment(str)\n cnode = Node.new(nil, COMMENT_NAME, {}, str)\n delegate.comment((DOCUMENT_NODE + path + [cnode]).compact, self, cnode) if delegate.respond_to?(:comment)\n end", "def consume_comments; end", "def current_comment_block\n @current_comment_block ||= current_comment ? comment_blocks.find { |block| block.include?(current_comment) } : []\n end", "def adjacent_comments(comments, buffer)\n comments = comments.sort_by { |comment| comment.location.begin_pos }\n current_chunk = 0\n last_line_seen = -100\n chunks_to_comment = comments.chunk do |comment|\n line = comment.location.begin_pos.line\n if last_line_seen.next == line\n last_line_seen = line\n current_chunk\n else\n last_line_seen = line\n current_chunk += 1\n end\n end\n chunks_to_comment.map &:last\n end", "def has_comment?(line)\n line =~ /#[^{]/\n end", "def get_comment\n\t\treturn @comment\n\tend", "def type comment\n comment.nil? ? 'Solution' : 'Comment'\n end", "def new_comment comment, line_no = nil\n c = RDoc::Comment.new comment, @top_level, :ruby\n c.line = line_no\n c.format = @markup\n c\n end", "def comment_code\n block_match = /\\{([^\\{\\}]*)\\}/\n matches = @code.scan(block_match)\n return if matches.size != 1\n \n block = matches[0][0].to_s\n @code.gsub!(block_match, \"{\\n#{comment_struct_list(block)}#{@indent}}\")\n end", "def load_comment(name)\n Jhead.call(\"-ci\", name.shellescape, @match, @pattern)\n end", "def comment_line?(line_source)\n /^\\s*#/.match?(line_source)\n end", "def current_comment\n @current_comment ||= comments.find { |comment| location.included?(comment.location) }\n end", "def spaceFirstComment(theLines)\n\n\ttheLines.each_with_index do |theLine, theIndex|\n\n\t\t# Two blank lines between brace and leading comment\n\t\tif (theLine[:text] == \"{\" && theLine[:comment].empty?)\n\t\t\n\t\t\tnextLine = theLines[theIndex + 1];\n\n\t\t\tif (nextLine[:text] =~ /^\\s*$/ && !nextLine[:comment].empty?)\n\t\t\t\ttheLines.insert(theIndex + 1, EMPTY_LINE);\n\t\t\t\ttheLines.insert(theIndex + 1, EMPTY_LINE);\n\t\t\t\tbreak;\n\t\t\tend\n\t\tend\n\n\tend\n\nend", "def comment_level\n @comment_level || SEVERITY_LEVELS.first\n end" ]
[ "0.73834175", "0.7323518", "0.7208017", "0.71247345", "0.71201324", "0.691946", "0.6904402", "0.6860326", "0.6812443", "0.66707355", "0.6609536", "0.6508557", "0.65068537", "0.6457085", "0.6401644", "0.636546", "0.6330941", "0.6329964", "0.63258374", "0.6316141", "0.62840015", "0.6273554", "0.62659895", "0.624572", "0.6237197", "0.6157554", "0.610869", "0.60908234", "0.6071901", "0.60599196", "0.60578144", "0.60180867", "0.6000242", "0.5964297", "0.59538734", "0.59490615", "0.59233034", "0.59233034", "0.59233034", "0.59209776", "0.5916591", "0.58941054", "0.5858808", "0.5833744", "0.58300555", "0.58245987", "0.58034897", "0.57658917", "0.5756072", "0.5753755", "0.57330406", "0.5698286", "0.56662524", "0.5654936", "0.5654936", "0.5645372", "0.56322706", "0.5631987", "0.5613073", "0.5613073", "0.5613073", "0.5613073", "0.5612607", "0.5612607", "0.5605145", "0.55951715", "0.5593042", "0.55864704", "0.5576655", "0.5574637", "0.5574637", "0.55098367", "0.5502539", "0.5499977", "0.5489915", "0.54782397", "0.54672277", "0.5462077", "0.545983", "0.54510796", "0.54510796", "0.5449756", "0.54416627", "0.5437014", "0.54317915", "0.54205835", "0.54147035", "0.5400887", "0.53846073", "0.5366547", "0.534932", "0.53485435", "0.5334592", "0.5332414", "0.5332041", "0.533025", "0.53255886", "0.5312282", "0.5312011", "0.52872103" ]
0.6439953
14
Tests next literal is MultiLineComment or not. If literal is MultiLineComment return ECMA262::MultiLineComment object and forward lexical parser position. Otherwise return nil and position is not changed.
def multi_line_comment # /* if @codes[@pos] == 0x2f and @codes[@pos + 1] == 0x2a @pos += 2 pos0 = @pos # */ while (code = @codes[@pos] != 0x2a) or @codes[@pos + 1] != 0x2f raise ParseError.new("no `*/' at end of comment", self) if code.nil? @pos += 1 end @pos +=2 return ECMA262::MultiLineComment.new(@codes[pos0...(@pos-2)].pack("U*")) else nil end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def single_line_comment\n # //\n if @codes[@pos] == 0x2f and @codes[@pos + 1] == 0x2f\n @pos += 2\n pos0 = @pos\n while (code = @codes[@pos]) and !line_terminator?(code)\n @pos += 1\n end\n return ECMA262::SingleLineComment.new(@codes[pos0...@pos].pack(\"U*\"))\n else\n nil\n end\n end", "def _comment\n\n _save = self.pos\n while true # sequence\n _tmp = match_string(\"#\")\n unless _tmp\n self.pos = _save\n break\n end\n while true\n\n _save2 = self.pos\n while true # sequence\n _save3 = self.pos\n _tmp = apply(:_eol)\n _tmp = _tmp ? nil : true\n self.pos = _save3\n unless _tmp\n self.pos = _save2\n break\n end\n _tmp = get_byte\n unless _tmp\n self.pos = _save2\n end\n break\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true\n unless _tmp\n self.pos = _save\n break\n end\n _tmp = apply(:_eol)\n unless _tmp\n self.pos = _save\n end\n break\n end # end sequence\n\n set_failed_rule :_comment unless _tmp\n return _tmp\n end", "def _comment\n\n _save = self.pos\n begin # sequence\n _tmp = match_string(\"#\")\n break unless _tmp\n while true # kleene\n\n _save1 = self.pos\n begin # sequence\n _save2 = self.pos\n _tmp = apply(:_eol)\n _tmp = !_tmp\n self.pos = _save2\n break unless _tmp\n _tmp = match_dot\n end while false\n unless _tmp\n self.pos = _save1\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true # end kleene\n break unless _tmp\n _tmp = apply(:_eol)\n end while false\n unless _tmp\n self.pos = _save\n end # end sequence\n\n set_failed_rule :_comment unless _tmp\n return _tmp\n end", "def read_line_comment(token)\n token.kind = :line_comment\n read_next() while (current = peek_next()) && current != ?\\n\n token.value = @source[token.from .. @marker.source_index] if !@skip_comments\n end", "def _comment\n\n _save = self.pos\n while true # choice\n _tmp = scan(/\\A(?-mix:--.*?$)/)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_multi_comment)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_comment unless _tmp\n return _tmp\n end", "def _Comment\n\n _save = self.pos\n while true # sequence\n _tmp = apply(:__hyphen_)\n unless _tmp\n self.pos = _save\n break\n end\n _tmp = match_string(\"//\")\n unless _tmp\n self.pos = _save\n break\n end\n while true\n\n _save2 = self.pos\n while true # sequence\n _save3 = self.pos\n _tmp = apply(:_Nl)\n _tmp = _tmp ? nil : true\n self.pos = _save3\n unless _tmp\n self.pos = _save2\n break\n end\n _tmp = get_byte\n unless _tmp\n self.pos = _save2\n end\n break\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true\n unless _tmp\n self.pos = _save\n break\n end\n _tmp = apply(:_Nl)\n unless _tmp\n self.pos = _save\n break\n end\n while true\n _tmp = apply(:_EmptyLine)\n break unless _tmp\n end\n _tmp = true\n unless _tmp\n self.pos = _save\n end\n break\n end # end sequence\n\n set_failed_rule :_Comment unless _tmp\n return _tmp\n end", "def comment\n multi_line_comment || single_line_comment\n end", "def ml_comment!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 40)\n\n type = ML_COMMENT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 478:4: '/*' ( . )* '*/'\n match(\"/*\")\n # --> action\n if @input.peek(1) == ?* then type = DOC_COMMENT else channel = HIDDEN end \n # <-- action\n # at line 478:88: ( . )*\n loop do #loop 4\n alt_4 = 2\n look_4_0 = @input.peek(1)\n\n if (look_4_0 == ?*) \n look_4_1 = @input.peek(2)\n\n if (look_4_1 == ?/) \n alt_4 = 2\n elsif (look_4_1.between?(0x0000, ?.) || look_4_1.between?(?0, 0xFFFF)) \n alt_4 = 1\n\n end\n elsif (look_4_0.between?(0x0000, ?)) || look_4_0.between?(?+, 0xFFFF)) \n alt_4 = 1\n\n end\n case alt_4\n when 1\n # at line 478:88: .\n match_any\n\n else\n break #loop 4\n end\n end\n match(\"*/\")\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 40)\n\n end", "def parse_comment\n return false unless @lexer.get and @lexer.get.type == :comment_start\n @lexer.next!\n\n buf = ''\n while token = @lexer.get\n break if token.type == :comment_end\n buf << token.value\n @lexer.next!\n end\n\n found :comment, buf\n @lexer.next!\n true\n end", "def multiline_comment!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 50 )\n\n\n\n type = MULTILINE_COMMENT\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 229:21: '/*' ( . )* '*/'\n match( \"/*\" )\n\n # at line 229:26: ( . )*\n while true # decision 7\n alt_7 = 2\n look_7_0 = @input.peek( 1 )\n\n if ( look_7_0 == 0x2a )\n look_7_1 = @input.peek( 2 )\n\n if ( look_7_1 == 0x2f )\n alt_7 = 2\n elsif ( look_7_1.between?( 0x0, 0x2e ) || look_7_1.between?( 0x30, 0xffff ) )\n alt_7 = 1\n\n end\n elsif ( look_7_0.between?( 0x0, 0x29 ) || look_7_0.between?( 0x2b, 0xffff ) )\n alt_7 = 1\n\n end\n case alt_7\n when 1\n # at line 229:26: .\n match_any\n\n else\n break # out of loop for decision 7\n end\n end # loop for decision 7\n\n\n match( \"*/\" )\n\n\n # --> action\n channel = HIDDEN;\n # <-- action\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 50 )\n\n\n end", "def _comment\n\n _save = self.pos\n while true # sequence\n _tmp = match_string(\"(\")\n unless _tmp\n self.pos = _save\n break\n end\n while true\n\n _save2 = self.pos\n while true # choice\n _tmp = apply(:_ctext)\n break if _tmp\n self.pos = _save2\n _tmp = apply(:_quoted_pair)\n break if _tmp\n self.pos = _save2\n _tmp = apply(:_comment)\n break if _tmp\n self.pos = _save2\n break\n end # end choice\n\n break unless _tmp\n end\n _tmp = true\n unless _tmp\n self.pos = _save\n break\n end\n _tmp = match_string(\")\")\n unless _tmp\n self.pos = _save\n end\n break\n end # end sequence\n\n set_failed_rule :_comment unless _tmp\n return _tmp\n end", "def _eof_comment\n\n _save = self.pos\n begin # sequence\n _tmp = match_string(\"#\")\n break unless _tmp\n while true # kleene\n\n _save1 = self.pos\n begin # sequence\n _save2 = self.pos\n _tmp = apply(:_eof)\n _tmp = !_tmp\n self.pos = _save2\n break unless _tmp\n _tmp = match_dot\n end while false\n unless _tmp\n self.pos = _save1\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true # end kleene\n end while false\n unless _tmp\n self.pos = _save\n end # end sequence\n\n set_failed_rule :_eof_comment unless _tmp\n return _tmp\n end", "def _comment\n\n _save = self.pos\n while true # sequence\n _tmp = match_string(\"#\")\n unless _tmp\n self.pos = _save\n break\n end\n while true\n\n _save2 = self.pos\n while true # sequence\n _save3 = self.pos\n _tmp = apply(:_end_hyphen_of_hyphen_line)\n _tmp = _tmp ? nil : true\n self.pos = _save3\n unless _tmp\n self.pos = _save2\n break\n end\n _tmp = apply(:_utf8)\n unless _tmp\n self.pos = _save2\n end\n break\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true\n unless _tmp\n self.pos = _save\n end\n break\n end # end sequence\n\n set_failed_rule :_comment unless _tmp\n return _tmp\n end", "def comment?\n @kind == :line_comment || @kind == :block_comment\n end", "def read_block_comment(token)\n token.kind = :block_comment\n\n read_next()\n while (current = read_next())\n if current == ?* && peek_next() == ?/\n current = read_next()\n break\n end\n end\n\n raise_error(:unterminated_block_comment, \"Unterminated block comment\") if !current\n token.value = @source[token.from .. @marker.source_index] if !@skip_comments\n end", "def lex\n @index += 1\n while lexer.tokens[@index] === :COMMENT\n @index += 1\n end\n lexer.tokens[@index] or unexpected_error(:EOF)\n end", "def multiline_comment(prefix, postfix)\n lang_eval do\n @multiline_comment_prefix = prefix\n @multiline_comment_postfix = postfix\n end\n nil\n end", "def parse_comment\n s0 = @scanner.pos\n if match_str('*') == :failed\n @scanner.pos = s0\n s0 = :failed\n else\n s2 = parse_nonls\n if parse_nl == :failed\n @scanner.pos = s0\n s0 = :failed\n else\n @reported_pos = s0\n s0 = s2.join\n end\n end\n if s0 == :failed\n s0 = @scanner.pos\n s1 = match_str('&')\n if s1 == :failed\n @scanner.pos = s0\n s0 = :failed\n else\n s2 = parse_nonls\n if parse_nl == :failed\n @scanner.pos = s0\n s0 = :failed\n else\n @reported_pos = s0\n s0 = '&' + s2.join\n end\n end\n end\n s0\n end", "def comment?\n return @assigned_paragraph_type == :comment if @assigned_paragraph_type\n return block_type.casecmp(\"COMMENT\") if begin_block? or end_block?\n return @line =~ /^[ \\t]*?#[ \\t]/\n end", "def lex_en_line_comment; end", "def lex_en_line_comment; end", "def lex_en_line_comment; end", "def collect_first_comment\n skip_tkspace\n comment = ''.dup\n comment = RDoc::Encoding.change_encoding comment, @encoding if @encoding\n first_line = true\n first_comment_tk_kind = nil\n line_no = nil\n\n tk = get_tk\n\n while tk && (:on_comment == tk[:kind] or :on_embdoc == tk[:kind])\n comment_body = retrieve_comment_body(tk)\n if first_line and comment_body =~ /\\A#!/ then\n skip_tkspace\n tk = get_tk\n elsif first_line and comment_body =~ /\\A#\\s*-\\*-/ then\n first_line = false\n skip_tkspace\n tk = get_tk\n else\n break if first_comment_tk_kind and not first_comment_tk_kind === tk[:kind]\n first_comment_tk_kind = tk[:kind]\n\n line_no = tk[:line_no] if first_line\n first_line = false\n comment << comment_body\n tk = get_tk\n\n if :on_nl === tk then\n skip_tkspace_without_nl\n tk = get_tk\n end\n end\n end\n\n unget_tk tk\n\n new_comment comment, line_no\n end", "def process_initial_comment(tk)\n if @statement.empty? && (@comments_last_line || 0) < tk.line_no - 2\n @comments = nil\n end\n\n return unless tk.class == TkCOMMENT\n\n case tk.text\n when Parser::SourceParser::SHEBANG_LINE\n if !@last_ns_tk && !@encoding_line\n @shebang_line = tk.text\n return\n end\n when Parser::SourceParser::ENCODING_LINE\n if (@last_ns_tk.class == TkCOMMENT && @last_ns_tk.text == @shebang_line) ||\n !@last_ns_tk\n @encoding_line = tk.text\n return\n end\n end\n\n return if [email protected]? && @comments\n return if @first_line && tk.line_no > @first_line\n\n if @comments_last_line && @comments_last_line < tk.line_no - 1\n if @comments && @statement.empty?\n @tokens.unshift(tk)\n return @done = true\n end\n @comments = nil\n end\n @comments_line = tk.line_no unless @comments\n\n # Remove the \"#\" and up to 1 space before the text\n # Since, of course, the convention is to have \"# text\"\n # and not \"#text\", which I deem ugly (you heard it here first)\n @comments ||= []\n if tk.text.start_with?('=begin')\n lines = tk.text.count(\"\\n\")\n @comments += tk.text.gsub(/\\A=begin.*\\r?\\n|\\r?\\n=end.*\\r?\\n?\\Z/, '').split(/\\r?\\n/)\n @comments_last_line = tk.line_no + lines\n else\n @comments << tk.text.gsub(/^(#+)\\s{0,1}/, '')\n @comments_hash_flag = $1 == '##' if @comments_hash_flag.nil?\n @comments_last_line = tk.line_no\n end\n @comments.pop if @comments.size == 1 && @comments.first =~ /^\\s*$/\n true\n end", "def detect_comments\n if @input =~ %r{^\\s*[/]{2}}\n @mode = :comment\n @expression = ''\n end\n end", "def comment_token\n return unless match = @chunk.match(COMMENT)\n # _, comment = *match\n # token(:COMMENT, comment, 0, comment.size)\n match[0].size\n end", "def consume_comments\n if @s.peek(2) == '/*'\n @s.consume\n @s.consume\n\n if text = @s.scan_until(RE_COMMENT_CLOSE)\n text.slice!(-2, 2)\n else\n # Parse error.\n text = @s.consume_rest\n end\n\n return create_token(:comment, :value => text)\n end\n\n nil\n end", "def multiLineComment(comment, header = \"\")\r\n bar = \"\"\r\n hdrBar = \"\"\r\n\r\n # 3: 3 misc chars (/* )\r\n width = (@maxWidth - 3)\r\n\r\n width.times do\r\n bar += \"*\"\r\n end\r\n\r\n if (header.length > 0) # Generate a formatted header if it exists.\r\n hdrWidth = (@maxWidth - 6 - header.length) / 2\r\n\r\n hdrWidth.times do\r\n hdrBar += \" \"\r\n end # times\r\n end # if header\r\n\r\n output = <<EOF\r\n\r\n\r\n\r\n\r\n/* #{bar}\r\n#{hdrBar}-- #{header} --#{hdrBar}\r\n\r\n#{comment}\r\n\r\n#{bar} */\r\n\r\n\r\n\r\n\r\nEOF\r\n\r\n output\r\n\r\n end", "def comment_token\n if md=HERE_COMMENT.match(@chunk)\n input, comment, body = md.to_a\n token :HereComment, body, :newLine => true\n token :Terminator, \"\\n\"\n @line += count(comment, \"\\n\")\n return comment.length\n elsif md=COMMENT.match(@chunk)\n input, comment, body = md.to_a\n token :Comment, body\n return comment.length\n end\n\n return nil\n end", "def lex_en_line_comment=(_arg0); end", "def lex_en_line_comment=(_arg0); end", "def lex_en_line_comment=(_arg0); end", "def wrap_final_comment\n current = wrap_rwhitespace(whitespaces: /\\A[ \\t\\r\\f]+/)\n if @source_buffer.slice(current.end_pos) != '#'\n # No comment, do nothing\n return self\n end\n comment = @source_buffer.slice(current.end_pos..-1)[/\\A[^\\n]+/] || ''\n current.adjust(end_pos: comment.size)\n end", "def lex_comment(input)\n case input\n when /\\A\\s*\\/\\// # single line comment //\n if /\\A\\s*(\\/\\/.*?\\n)/.match(input)\n return :comment, $1, $'\n else\n return :open, '//', input\n end\n when /\\A\\s*\\/\\*/m # multi-line comment /* */\n if /\\A\\s*(\\/\\*.*?\\*\\/)/m.match(input)\n return :comment, $1, $'\n else\n return :open, '/*', input\n end\n when /\\A\\s*@\"/ # objective C string\n if :objective_c == @language\n token, value, rest = lex_string($')\n if :open == token\n return :open, '@\"', input\n elsif :error == token\n return :error, nil, input\n else\n return :string, '@\"' + value, rest\n end\n else\n return :error, nil, input\n end\n when /\\A\\s*\"/ # double quoted string \" \"\n token, value, rest = lex_string($')\n if :open == token\n return :open, '\"', input\n elsif :error == token\n return :error, nil, input\n else\n return :string, '\"' + value, rest\n end\n when /\\A\\s*'/ # char literal ' '\n token, value, rest = lex_char($')\n if :open == token\n return :open, \"'\", input\n elsif :error == token\n return :error, nil, input\n else\n return :char, \"'\" + value, rest\n end\n when /\\A\\s*(@#{@regex_identifier})/ # objective c directive\n value, rest = $1, $'\n if @keywords.include?(value)\n return :keyword, value, rest\n else\n return :error, nil, input\n end\n when /\\A\\s*(#{@regex_identifier})/\n value, rest = $1, $'\n if @keywords.include?(value)\n return :keyword, value, rest\n elsif @unique_tokens.has_key?(value)\n return @unique_tokens[value], value, rest\n else\n return :identifier, value, rest\n end\n when /\\A\\s*(#{@regex_float})/\n return :float, $1, $'\n when /\\A\\s*(#{@regex_hex_float})/\n return :float, $1, $'\n when /\\A\\s*(#{@regex_integer})/\n return :integer, $1, $'\n when /\\A\\s*(\\S.*)\\z/m\n val, rest = lex_punctuator($1)\n if val\n return :punctuator, val, rest\n else\n return :error, nil, input\n end\n else\n return :end\n end\n end", "def _HtmlComment\n\n _save = self.pos\n while true # sequence\n _tmp = match_string(\"<!--\")\n unless _tmp\n self.pos = _save\n break\n end\n while true\n\n _save2 = self.pos\n while true # sequence\n _save3 = self.pos\n _tmp = match_string(\"-->\")\n _tmp = _tmp ? nil : true\n self.pos = _save3\n unless _tmp\n self.pos = _save2\n break\n end\n _tmp = get_byte\n unless _tmp\n self.pos = _save2\n end\n break\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true\n unless _tmp\n self.pos = _save\n break\n end\n _tmp = match_string(\"-->\")\n unless _tmp\n self.pos = _save\n end\n break\n end # end sequence\n\n set_failed_rule :_HtmlComment unless _tmp\n return _tmp\n end", "def lex_comment line\n # do nothing\n end", "def parse_with_comments(source_buffer); end", "def parse_with_comments(source_buffer); end", "def parse_with_comments(source_buffer); end", "def comment\n comment = buffer.options.comment_line.to_s\n indent = nil\n lines = []\n\n each_line do |line, fc, tc|\n line_fc = \"#{line}.#{fc}\"\n line_tc = \"#{line}.#{tc}\"\n\n next if buffer.at_end == line_tc\n\n lines << line\n\n next if indent == 0 # can't get lower\n\n line = buffer.get(\"#{line}.#{fc}\", \"#{line}.#{tc}\")\n\n next unless start = line =~ /\\S/\n\n indent ||= start\n indent = start if start < indent\n end\n\n indent ||= 0\n\n buffer.undo_record do |record|\n lines.each do |line|\n record.insert(\"#{line}.#{indent}\", comment)\n end\n end\n end", "def consume_comment(input)\n while not input.eof? do\n case input.look_ahead\n when \"\\\\\" : \n # In comments, only escaped backslashes and line endings matter\n if [\"\\n\", \"\\\\\"].include? input.look_ahead(1)\n input.consume\n end\n when \"\\n\" : input.consume; break \n end\n input.consume\n end\n end", "def is_comment(line)\n result = false\n # If source supports single line comments\n if comment_symbols[:single_line]\n result ||= line =~ /^\\s*#{Regexp.escape(comment_symbols[:single_line])}/\n end\n\n # If source supports multi-line comments\n if comment_symbols[:multiline]\n result ||= line =~ /^\\s*#{Regexp.escape(comment_symbols[:multiline][:begin])}/\n end\n result\n end", "def multiline?(node); end", "def comment_line?(line_source); end", "def comment_line?(line_source); end", "def count_single_line_comments(buff, comment_regexp)\n a = buff.select { |l|\n not l.match(comment_regexp).nil?\n }.size\n a\n end", "def count_single_line_comments(buff, comment_regexp)\n a = buff.select { |l|\n not l.match(comment_regexp).nil?\n }.size\n a\n end", "def comment!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 15)\n\n type = COMMENT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 148:3: ( ( '#' | '//' ) (~ '\\\\n' )* | '/*' ( . )* '*/' )\n alt_10 = 2\n look_10_0 = @input.peek(1)\n\n if (look_10_0 == ?#) \n alt_10 = 1\n elsif (look_10_0 == ?/) \n look_10_2 = @input.peek(2)\n\n if (look_10_2 == ?/) \n alt_10 = 1\n elsif (look_10_2 == ?*) \n alt_10 = 2\n else\n nvae = NoViableAlternative(\"\", 10, 2)\n raise nvae\n end\n else\n nvae = NoViableAlternative(\"\", 10, 0)\n raise nvae\n end\n case alt_10\n when 1\n # at line 148:5: ( '#' | '//' ) (~ '\\\\n' )*\n # at line 148:5: ( '#' | '//' )\n alt_7 = 2\n look_7_0 = @input.peek(1)\n\n if (look_7_0 == ?#) \n alt_7 = 1\n elsif (look_7_0 == ?/) \n alt_7 = 2\n else\n nvae = NoViableAlternative(\"\", 7, 0)\n raise nvae\n end\n case alt_7\n when 1\n # at line 148:7: '#'\n match(?#)\n\n when 2\n # at line 148:13: '//'\n match(\"//\")\n\n end\n # at line 148:20: (~ '\\\\n' )*\n while true # decision 8\n alt_8 = 2\n look_8_0 = @input.peek(1)\n\n if (look_8_0.between?(0x0000, ?\\t) || look_8_0.between?(0x000B, 0xFFFF)) \n alt_8 = 1\n\n end\n case alt_8\n when 1\n # at line 148:20: ~ '\\\\n'\n if @input.peek(1).between?(0x0000, ?\\t) || @input.peek(1).between?(0x000B, 0x00FF)\n @input.consume\n else\n mse = MismatchedSet(nil)\n recover(mse)\n raise mse\n end\n\n\n\n else\n break # out of loop for decision 8\n end\n end # loop for decision 8\n\n when 2\n # at line 149:5: '/*' ( . )* '*/'\n match(\"/*\")\n # at line 149:10: ( . )*\n while true # decision 9\n alt_9 = 2\n look_9_0 = @input.peek(1)\n\n if (look_9_0 == ?*) \n look_9_1 = @input.peek(2)\n\n if (look_9_1 == ?/) \n alt_9 = 2\n elsif (look_9_1.between?(0x0000, ?.) || look_9_1.between?(?0, 0xFFFF)) \n alt_9 = 1\n\n end\n elsif (look_9_0.between?(0x0000, ?)) || look_9_0.between?(?+, 0xFFFF)) \n alt_9 = 1\n\n end\n case alt_9\n when 1\n # at line 149:10: .\n match_any\n\n else\n break # out of loop for decision 9\n end\n end # loop for decision 9\n match(\"*/\")\n\n end\n \n @state.type = type\n @state.channel = channel\n # --> action\n skip \n # <-- action\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 15)\n\n end", "def line_comment!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 78 )\n\n\n\n type = LINE_COMMENT\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 608:8: '#' (~ ( '\\\\n' | '\\\\r' ) )* ( '\\\\r' )? '\\\\n'\n match( 0x23 )\n # at line 608:12: (~ ( '\\\\n' | '\\\\r' ) )*\n while true # decision 25\n alt_25 = 2\n look_25_0 = @input.peek( 1 )\n\n if ( look_25_0.between?( 0x0, 0x9 ) || look_25_0.between?( 0xb, 0xc ) || look_25_0.between?( 0xe, 0xffff ) )\n alt_25 = 1\n\n end\n case alt_25\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x0, 0x9 ) || @input.peek( 1 ).between?( 0xb, 0xc ) || @input.peek( 1 ).between?( 0xe, 0xffff )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n break # out of loop for decision 25\n end\n end # loop for decision 25\n\n # at line 608:26: ( '\\\\r' )?\n alt_26 = 2\n look_26_0 = @input.peek( 1 )\n\n if ( look_26_0 == 0xd )\n alt_26 = 1\n end\n case alt_26\n when 1\n # at line 608:26: '\\\\r'\n match( 0xd )\n\n end\n match( 0xa )\n\n # --> action\n channel=HIDDEN;\n # <-- action\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 78 )\n\n\n end", "def preceding_comment?(node1, node2); end", "def parse_comments\n @data[4][0]\n end", "def parse_comment container, tk, comment\n return parse_comment_tomdoc container, tk, comment if @markup == 'tomdoc'\n column = tk[:char_no]\n line_no = comment.line.nil? ? tk[:line_no] : comment.line\n\n comment.text = comment.text.sub(/(^# +:?)(singleton-)(method:)/, '\\1\\3')\n singleton = !!$~\n\n co =\n if (comment.text = comment.text.sub(/^# +:?method: *(\\S*).*?\\n/i, '')) && !!$~ then\n line_no += $`.count(\"\\n\")\n parse_comment_ghost container, comment.text, $1, column, line_no, comment\n elsif (comment.text = comment.text.sub(/# +:?(attr(_reader|_writer|_accessor)?): *(\\S*).*?\\n/i, '')) && !!$~ then\n parse_comment_attr container, $1, $3, comment\n end\n\n if co then\n co.singleton = singleton\n co.line = line_no\n end\n\n true\n end", "def comment!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 13 )\n\n type = COMMENT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 63:5: '#' (~ ( '\\\\r' | '\\\\n' ) )*\n match( 0x23 )\n # at line 63:9: (~ ( '\\\\r' | '\\\\n' ) )*\n while true # decision 22\n alt_22 = 2\n look_22_0 = @input.peek( 1 )\n\n if ( look_22_0.between?( 0x0, 0x9 ) || look_22_0.between?( 0xb, 0xc ) || look_22_0.between?( 0xe, 0xffff ) )\n alt_22 = 1\n\n end\n case alt_22\n when 1\n # at line 63:11: ~ ( '\\\\r' | '\\\\n' )\n if @input.peek( 1 ).between?( 0x0, 0x9 ) || @input.peek( 1 ).between?( 0xb, 0xc ) || @input.peek( 1 ).between?( 0xe, 0xff )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n\n else\n break # out of loop for decision 22\n end\n end # loop for decision 22\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 13 )\n\n end", "def parse_with_comments(source_buffer)\n @lexer.comments = []\n\n [ parse(source_buffer), @lexer.comments ]\n ensure\n @lexer.comments = nil\n end", "def sl_comment!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 39)\n\n type = SL_COMMENT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 470:5: '//' ( ' $ANTLR ' SRC | (~ ( '\\\\r' | '\\\\n' ) )* ) ( '\\\\r' )? '\\\\n'\n match(\"//\")\n # at line 471:5: ( ' $ANTLR ' SRC | (~ ( '\\\\r' | '\\\\n' ) )* )\n alt_2 = 2\n alt_2 = @dfa2.predict(@input)\n case alt_2\n when 1\n # at line 471:7: ' $ANTLR ' SRC\n match(\" $ANTLR \")\n src!\n\n when 2\n # at line 472:6: (~ ( '\\\\r' | '\\\\n' ) )*\n # at line 472:6: (~ ( '\\\\r' | '\\\\n' ) )*\n loop do #loop 1\n alt_1 = 2\n look_1_0 = @input.peek(1)\n\n if (look_1_0.between?(0x0000, ?\\t) || look_1_0.between?(0x000B, ?\\f) || look_1_0.between?(0x000E, 0xFFFF)) \n alt_1 = 1\n\n end\n case alt_1\n when 1\n # at line 472:6: ~ ( '\\\\r' | '\\\\n' )\n if @input.peek(1).between?(0x0000, ?\\t) || @input.peek(1).between?(0x000B, ?\\f) || @input.peek(1).between?(0x000E, 0x00FF)\n @input.consume\n else\n mse = MismatchedSet(nil)\n recover(mse)\n raise mse\n end\n\n\n\n else\n break #loop 1\n end\n end\n\n end\n # at line 474:3: ( '\\\\r' )?\n alt_3 = 2\n look_3_0 = @input.peek(1)\n\n if (look_3_0 == ?\\r) \n alt_3 = 1\n end\n case alt_3\n when 1\n # at line 474:3: '\\\\r'\n match(?\\r)\n\n end\n match(?\\n)\n # --> action\n channel=HIDDEN;\n # <-- action\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 39)\n\n end", "def next_item\n return @last_lexeme if @last_lexeme[0].nil?\n while true\n @line = next_line if buffer_empty?\n if @line.nil?\n lexeme = [nil, @line_no, 1]\n break\n end\n\n # Skip whitespaces\n while space?(@line[@pos])\n @pos += 1\n end\n\n # Skip triple dot characters (join lines)\n if @line[@pos, 4] == \"...\\n\" || @line[@pos, 2] == \"…\\n\"\n line_no, pos = @line_no, @pos + 1\n @line, @pos = next_line, 0\n if @line.nil? || @line.strip.empty?\n raise SyntaxError.new(line_no, pos, 'Line continuation may not be followed by an empty line')\n end\n next\n end\n\n # Skip one line comments\n if @line[@pos, 3] == 'BTW'\n @pos = @line.length - 1\n end\n # and multiline ones\n if @last_lexeme[0] == \"\\n\" && @line[@pos, 4] == 'OBTW'\n tldr_found, line_no, pos = false, @line_no, @pos + 1\n while true\n @line = next_line\n break if @line.nil?\n m = @line.chomp.match(/(^|\\s+)TLDR\\s*(,|$)/)\n unless m.nil?\n tldr_found = true\n @pos = m.end(0)\n break\n end\n end\n unless tldr_found\n raise SyntaxError.new(line_no, pos, 'Unterminated multiline comment')\n end\n next\n end\n\n if @line[@pos] == \"\\n\" || @line[@pos] == '!'\n # Handle newline and bang separately\n lexeme = [@line[@pos], @line_no, @pos + 1]\n @pos += 1\n elsif @line[@pos] == ','\n # Comma is a virtual newline\n lexeme = [\"\\n\", @line_no, @pos + 1]\n @pos += 1\n elsif @line[@pos] == '\"'\n # Strings begin with \"\n # Need to handle empty strings separately\n if @line[@pos + 1] == '\"'\n string = '\"\"'\n else\n m = @line.match(/([^:](?:::)*)\"/, @pos + 1)\n string = @line[@pos..m.end(0) - 1] unless m.nil?\n end\n # String must be followed by an allowed lexeme delimiter\n if string.nil? || !lexeme_delimiter?(@pos + string.length)\n raise SyntaxError.new(@line_no, @pos + 1, 'Unterminated string constant')\n end\n lexeme = [%Q[\"#{escape_string(string[1..-2])}\"], @line_no, @pos + 1]\n @pos = @pos + string.length\n else\n # Grab as much characters as we can until meeting lexeme delimiter\n # Treat what we grabbed as a lexeme\n seq, pos = '', @pos + 1\n until lexeme_delimiter?(@pos)\n seq += @line[@pos]\n @pos += 1\n end\n lexeme = [seq, @line_no, pos]\n end\n\n break\n end\n @last_lexeme = lexeme\n end", "def process_block_comment!(index, tokens, ranges)\n start_index = index\n line_num = line_for_offset(tokens[index][1])\n while index < tokens.length - 2\n break unless tokens[index + 1][0] == :TOKEN_COMMENT\n next_line = line_for_offset(tokens[index + 1][1])\n # Tokens must be on contiguous lines\n break unless next_line == line_num + 1\n # Must not be a region comment\n comment = extract_text(tokens[index + 1][1])\n break if start_region?(comment) || end_region?(comment)\n # It's a block comment\n line_num = next_line\n index += 1\n end\n\n return index if start_index == index\n\n add_range!(create_range_span_tokens(tokens[start_index][1], tokens[index][1], REGION_COMMENT), ranges)\n index\n end", "def is_multiline?\n @code =~ /\\n/\n end", "def line_comments_option; end", "def mynext()\n c = get\n \n if (c == \"/\")\n if (peek == \"/\")\n while(true)\n c = get\n if (c[0] <= ?\\n)\n return c\n end\n end\n end\n if (peek == \"*\")\n get\n while (true)\n case get\n when \"*\"\n if (peek == \"/\")\n get\n return \" \"\n end\n when EOF\n raise \"Unterminated comment\"\n end\n end\n end\n end\n return c\n end", "def parse_line_break; end", "def parse_statements(container, single = NORMAL, current_method = nil,\n comment = new_comment(''))\n raise 'no' unless RDoc::Comment === comment\n comment = RDoc::Encoding.change_encoding comment, @encoding if @encoding\n\n nest = 1\n save_visibility = container.visibility\n\n non_comment_seen = true\n\n while tk = get_tk do\n keep_comment = false\n try_parse_comment = false\n\n non_comment_seen = true unless (:on_comment == tk[:kind] or :on_embdoc == tk[:kind])\n\n case tk[:kind]\n when :on_nl, :on_ignored_nl, :on_comment, :on_embdoc then\n if :on_nl == tk[:kind] or :on_ignored_nl == tk[:kind]\n skip_tkspace\n tk = get_tk\n else\n past_tokens = @read.size > 1 ? @read[0..-2] : []\n nl_position = 0\n past_tokens.reverse.each_with_index do |read_tk, i|\n if read_tk =~ /^\\n$/ then\n nl_position = (past_tokens.size - 1) - i\n break\n elsif read_tk =~ /^#.*\\n$/ then\n nl_position = ((past_tokens.size - 1) - i) + 1\n break\n end\n end\n comment_only_line = past_tokens[nl_position..-1].all?{ |c| c =~ /^\\s+$/ }\n unless comment_only_line then\n tk = get_tk\n end\n end\n\n if tk and (:on_comment == tk[:kind] or :on_embdoc == tk[:kind]) then\n if non_comment_seen then\n # Look for RDoc in a comment about to be thrown away\n non_comment_seen = parse_comment container, tk, comment unless\n comment.empty?\n\n comment = ''\n comment = RDoc::Encoding.change_encoding comment, @encoding if @encoding\n end\n\n line_no = nil\n while tk and (:on_comment == tk[:kind] or :on_embdoc == tk[:kind]) do\n comment_body = retrieve_comment_body(tk)\n line_no = tk[:line_no] if comment.empty?\n comment += comment_body\n comment << \"\\n\" unless comment_body =~ /\\n\\z/\n\n if comment_body.size > 1 && comment_body =~ /\\n\\z/ then\n skip_tkspace_without_nl # leading spaces\n end\n tk = get_tk\n end\n\n comment = new_comment comment, line_no\n\n unless comment.empty? then\n look_for_directives_in container, comment\n\n if container.done_documenting then\n throw :eof if RDoc::TopLevel === container\n container.ongoing_visibility = save_visibility\n end\n end\n\n keep_comment = true\n else\n non_comment_seen = true\n end\n\n unget_tk tk\n keep_comment = true\n container.current_line_visibility = nil\n\n when :on_kw then\n case tk[:text]\n when 'class' then\n parse_class container, single, tk, comment\n\n when 'module' then\n parse_module container, single, tk, comment\n\n when 'def' then\n parse_method container, single, tk, comment\n\n when 'alias' then\n parse_alias container, single, tk, comment unless current_method\n\n when 'yield' then\n if current_method.nil? then\n warn \"Warning: yield outside of method\" if container.document_self\n else\n parse_yield container, single, tk, current_method\n end\n\n when 'until', 'while' then\n if (tk[:state] & RDoc::Parser::RipperStateLex::EXPR_LABEL) == 0\n nest += 1\n skip_optional_do_after_expression\n end\n\n # Until and While can have a 'do', which shouldn't increase the nesting.\n # We can't solve the general case, but we can handle most occurrences by\n # ignoring a do at the end of a line.\n\n # 'for' is trickier\n when 'for' then\n nest += 1\n skip_for_variable\n skip_optional_do_after_expression\n\n when 'case', 'do', 'if', 'unless', 'begin' then\n if (tk[:state] & RDoc::Parser::RipperStateLex::EXPR_LABEL) == 0\n nest += 1\n end\n\n when 'super' then\n current_method.calls_super = true if current_method\n\n when 'rescue' then\n parse_rescue\n\n when 'end' then\n nest -= 1\n if nest == 0 then\n container.ongoing_visibility = save_visibility\n\n parse_comment container, tk, comment unless comment.empty?\n\n return\n end\n end\n\n when :on_const then\n unless parse_constant container, tk, comment, current_method then\n try_parse_comment = true\n end\n\n when :on_ident then\n if nest == 1 and current_method.nil? then\n keep_comment = parse_identifier container, single, tk, comment\n end\n\n case tk[:text]\n when \"require\" then\n parse_require container, comment\n when \"include\" then\n parse_extend_or_include RDoc::Include, container, comment\n when \"extend\" then\n parse_extend_or_include RDoc::Extend, container, comment\n when \"included\" then\n parse_included_with_activesupport_concern container, comment\n end\n\n else\n try_parse_comment = nest == 1\n end\n\n if try_parse_comment then\n non_comment_seen = parse_comment container, tk, comment unless\n comment.empty?\n\n keep_comment = false\n end\n\n unless keep_comment then\n comment = new_comment ''\n comment = RDoc::Encoding.change_encoding comment, @encoding if @encoding\n container.params = nil\n container.block_params = nil\n end\n\n consume_trailing_spaces\n end\n\n container.params = nil\n container.block_params = nil\n end", "def next_statement\n statement, block, comments = TokenList.new, nil, nil\n stmt_number, level = 0, 0\n new_statement, open_block = true, false\n last_tk, last_ns_tk, before_last_tk = nil, nil, nil\n open_parens = 0\n\n while tk = @tokens.shift\n #p tk.class\n # !!!!!!!!!!!!!!!!!!!! REMOVED TkfLPAREN, TkfLBRACK\n open_parens += 1 if [TkLPAREN, TkLBRACK].include? tk.class\n open_parens -= 1 if [TkRPAREN, TkRBRACK].include?(tk.class) \n \n #if open_parens < 0 || level < 0\n # STDERR.puts block.to_s + \" TOKEN #{tk.inspect}\"\n # exit\n #end\n\n # Get the initial comments\n if statement.empty?\n # Two new-lines in a row will destroy any comment blocks\n if [TkCOMMENT].include?(tk.class) && last_tk.class == TkNL && \n (before_last_tk && (before_last_tk.class == TkNL || before_last_tk.class == TkSPACE))\n comments = nil\n elsif tk.class == TkCOMMENT\n # Remove the \"#\" and up to 1 space before the text\n # Since, of course, the convention is to have \"# text\"\n # and not \"#text\", which I deem ugly (you heard it here first)\n comments ||= []\n comments << tk.text.gsub(/^#+\\s{0,1}/, '') \n comments.pop if comments.size == 1 && comments.first =~ /^\\s*$/\n end\n end\n \n # Ignore any initial comments or whitespace\n unless statement.empty? && [TkSPACE, TkNL, TkCOMMENT].include?(tk.class)\n # Decrease if end or '}' is seen\n level -= 1 if [TkEND, TkRBRACE].include?(tk.class)\n \n # If the level is greater than 0, add the code to the block text\n # otherwise it's part of the statement text\n if stmt_number > 0\n #puts \"Block of #{statement}\"\n #puts \"#{stmt_number} #{tk.line_no} #{level} #{open_parens} #{tk.class.class_name} \\t#{tk.text.inspect} #{tk.lex_state} #{open_block.inspect}\" \n block ||= TokenList.new\n block << tk\n elsif stmt_number == 0 && tk.class != TkNL && tk.class != TkSEMICOLON && tk.class != TkCOMMENT\n statement << tk \n end\n\n #puts \"#{tk.line_no} #{level} #{open_parens} #{tk.class.class_name} \\t#{tk.text.inspect} #{tk.lex_state} #{open_block.inspect}\" \n\n # Increase level if we have a 'do' or block opening\n if tk.class == TkLBRACE #|| tk.class == TkfLBRACE\n level += 1 \n elsif [TkDO, TkBEGIN].include?(tk.class) \n #p \"#{tk.line_no} #{level} #{tk} \\t#{tk.text} #{tk.lex_state}\" \n level += 1 \n open_block = false # Cancel our wish to open a block for the if, we're doing it now\n end\n\n # Vouch to open a block when this statement would otherwise end\n open_block = [level, tk.class] if (new_statement || \n (last_tk && last_tk.lex_state == EXPR_BEG)) && \n OPEN_BLOCK_TOKENS.include?(tk.class)\n\n # Check if this token creates a new statement or not\n #puts \"#{open_parens} open brackets for: #{statement.to_s}\"\n if open_parens == 0 && ((last_tk && [TkSEMICOLON, TkNL, TkEND_OF_SCRIPT].include?(tk.class)) ||\n (open_block && open_block.last == TkDEF && tk.class == TkRPAREN))\n \n # Make sure we don't have any running expressions\n # This includes things like\n #\n # class <\n # Foo\n # \n # if a ||\n # b\n if (last_tk && [EXPR_END, EXPR_ARG].include?(last_tk.lex_state)) || \n (open_block && [TkNL, TkSEMICOLON].include?(tk.class) && last_ns_tk.class != open_block.last)\n stmt_number += 1\n new_statement = true\n #p \"NEW STATEMENT #{block.to_s}\"\n\n # The statement started with a if/while/begin, so we must go to the next level now\n if open_block && open_block.first == level\n if tk.class == TkNL && block.nil?\n block = TokenList.new\n block << tk\n end\n\n open_block = false\n level += 1\n end\n end\n elsif tk.class != TkSPACE\n new_statement = false \n end\n\n # Else keyword is kind of weird\n if tk.is_a? TkELSE\n new_statement = true\n stmt_number += 1\n open_block = false\n end\n\n # We're done if we've ended a statement and we're at level 0\n break if new_statement && level == 0\n \n #raise \"Unexpected end\" if level < 0\n end\n \n #break if new_statement && level == 0\n\n before_last_tk = last_tk\n last_tk = tk # Save last token\n last_ns_tk = tk unless [TkSPACE, TkNL, TkEND_OF_SCRIPT].include? tk.class\n end\n\n # Return the code block with starting token and initial comments\n # If there is no code in the block, return nil\n comments = comments.compact if comments\n statement.empty? ? nil : Statement.new(statement, block, comments)\n end", "def is_multiline?(text)\n text && text.length > 1 && text[-1] == MULTILINE_CHAR_VALUE && text[-2] == ?\\s\n end", "def parse_with_comments(source); end", "def extract_comment comment\n case comment\n when Array then\n comment.map do |c|\n extract_comment c\n end\n when nil\n RDoc::Comment.new ''\n when RDoc::Comment then\n if comment.text =~ /^#[ \\t]*:section:.*\\n/ then\n start = $`\n rest = $'\n\n comment.text = if start.empty? then\n rest\n else\n rest.sub(/#{start.chomp}\\Z/, '')\n end\n end\n\n comment\n when RDoc::Markup::Document then\n comment\n else\n raise TypeError, \"unknown comment #{comment.inspect}\"\n end\n end", "def is_comment?(line)\n true if line =~ /^\\#.*$/\n end", "def is_multiline?(line) # ' '[0] == 32\n line && line.length > 1 && line[-1] == MULTILINE_CHAR_VALUE && line[-2] == 32\n end", "def lex(input)\n loop do\n token, value, rest = lex_comment(input)\n if :comment == token\n raise \"infinite loop on input: #{input}\" if input == rest\n input = rest\n else\n return token, value, rest\n end\n end\n end", "def comment!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 35 )\n\n type = COMMENT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 350:9: '#' (~ ( '\\\\n' | '\\\\r' ) )*\n match( 0x23 )\n # at line 350:13: (~ ( '\\\\n' | '\\\\r' ) )*\n while true # decision 13\n alt_13 = 2\n look_13_0 = @input.peek( 1 )\n\n if ( look_13_0.between?( 0x0, 0x9 ) || look_13_0.between?( 0xb, 0xc ) || look_13_0.between?( 0xe, 0xffff ) )\n alt_13 = 1\n\n end\n case alt_13\n when 1\n # at line 350:13: ~ ( '\\\\n' | '\\\\r' )\n if @input.peek( 1 ).between?( 0x0, 0x9 ) || @input.peek( 1 ).between?( 0xb, 0xc ) || @input.peek( 1 ).between?( 0xe, 0xff )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n\n else\n break # out of loop for decision 13\n end\n end # loop for decision 13\n # --> action\n channel=HIDDEN;\n # <-- action\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 35 )\n\n end", "def parse_comment(raw)\n c = nil\n if raw =~ /\\A\\((.+?)\\)(.+)\\z/\n c, raw = [$2, $1]\n end\n if raw =~ /\\A(.+)\\((.+?)\\)\\z/\n raw, c = [$1, $2]\n end\n [raw, c]\n end", "def extract_last_comment(lines); end", "def next_statement\n @state = :first_statement\n @statement_stack = []\n @level = 0\n @block_num = 0\n @done = false\n @current_block = nil\n @comments_line = nil\n @comments_hash_flag = nil\n @statement = TokenList.new\n @block = nil\n @comments = nil\n @last_tk = nil\n @last_ns_tk = nil\n @before_last_tk = nil\n @before_last_ns_tk = nil\n @first_line = nil\n\n until @done\n tk = @tokens.shift\n break if tk.nil?\n process_token(tk)\n\n @before_last_tk = @last_tk\n @last_tk = tk # Save last token\n unless [TkSPACE, TkNL, TkEND_OF_SCRIPT].include? tk.class\n @before_last_ns_tk = @last_ns_tk\n @last_ns_tk = tk\n end\n end\n\n # Return the code block with starting token and initial comments\n # If there is no code in the block, return nil\n @comments = @comments.compact if @comments\n if @block || [email protected]?\n sanitize_statement_end\n sanitize_block\n @statement.pop if [TkNL, TkSPACE, TkSEMICOLON].include?(@statement.last.class)\n stmt = Statement.new(@statement, @block, @comments)\n if @comments && @comments_line\n stmt.comments_range = (@comments_line..(@comments_line + @comments.size - 1))\n stmt.comments_hash_flag = @comments_hash_flag\n end\n stmt\n elsif @comments\n @statement << TkCOMMENT.new(@comments_line, 0)\n @statement.first.set_text(\"# \" + @comments.join(\"\\n# \"))\n Statement.new(@statement, nil, @comments)\n end\n end", "def comment!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 49 )\n\n\n\n type = COMMENT\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 228:11: '//' ( . )* ( '\\\\n' | '\\\\r' )\n match( \"//\" )\n\n # at line 228:16: ( . )*\n while true # decision 6\n alt_6 = 2\n look_6_0 = @input.peek( 1 )\n\n if ( look_6_0 == 0xa || look_6_0 == 0xd )\n alt_6 = 2\n elsif ( look_6_0.between?( 0x0, 0x9 ) || look_6_0.between?( 0xb, 0xc ) || look_6_0.between?( 0xe, 0xffff ) )\n alt_6 = 1\n\n end\n case alt_6\n when 1\n # at line 228:16: .\n match_any\n\n else\n break # out of loop for decision 6\n end\n end # loop for decision 6\n\n if @input.peek(1) == 0xa || @input.peek(1) == 0xd\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n # --> action\n channel = HIDDEN;\n # <-- action\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 49 )\n\n\n end", "def is_comment?(line)\n line =~ /^\\s*#/\n end", "def allowed_multiline_argument?(node); end", "def existing_comment_option\n (options[:new_comments_only] && prev_line_comment) || (options[:inline] && original_line_has_comment?)\n end", "def mark_commented_lines\n [].tap do |reg|\n in_block_comment = false\n line_no = 0\n start_block = 0\n end_block = 0\n @source.each_line do |line|\n line_no = line_no+1\n\n start_block = line_no if !in_block_comment and line =~ @start_block_comment_regex\n end_block = line_no if start_block < line_no and line =~ @end_block_comment_regex\n end_block = line_no if line =~ @oneline_block_comment_regex\n\n in_block_comment = end_block < start_block\n\n reg << line_no if in_block_comment or end_block == line_no or line =~ @comment_regex\n end\n end\n end", "def next_token\n #dputs \"@line: \" + @line\n if @state == :normal\n while true\n temp = _next_token\n unless temp == \"#white_space\" || temp == \"#comment\"\n break\n end\n end\n #dputs \"token: \" + temp\n @current_token = temp\n return temp\n else\n return :Terminate\n end\n \n end", "def comment?\n type == COMMENT_NODE\n end", "def comment?\n @contents[0] == :comment\n end", "def parse_comments(comments); end", "def verify_comment(line) \n end", "def comment_line?(line_source)\n /^\\s*#/.match?(line_source)\n end", "def single_line?; end", "def parse_with_comments(code)\n return RubyLint::Parser.new.parse(code)\nend", "def reduce_multi_line(_production, _range, _tokens, _children)\n Regexp::MULTILINE\n end", "def node_on_single_line?(node)\n return if node.source_range.start_pos.line != node.source_range.end_pos.line\n\n # The Sass parser reports an incorrect source range if the trailing curly\n # brace is on the next line, e.g.\n #\n # p {\n # }\n #\n # Since we don't want to count this as a single line node, check if the\n # last character on the first line is an opening curly brace.\n engine.lines[node.line - 1].strip[-1] != '{'\n end", "def commentState\n data = @stream.char()\n if data == \"-\"\n @state = @states[\"commentDash\"]\n elsif data == EOF\n # XXX parse error\n @tokenQueue.append(@currentToken)\n @state = @states[\"data\"]\n else\n @currentToken[\"data\"] += data + @stream.charsUntil(\"-\")\n end\n return true\n end", "def nodoc_comment?(node, require_all: T.unsafe(nil)); end", "def count_multiline_comments(buff, regexp)\n unless regexp.is_a?(Array) then regexp = [regexp] end\n\n regexp.reduce(0) do |acc, regexp|\n acc + buff.reduce(''){|acc,x| acc + x}.scan(regexp).map { |x|\n x.map{|y| y.lines.count}.reduce(0){|acc,y| acc + y}\n }.reduce(0){|acc, x| acc + x}\n end\n end", "def count_multiline_comments(buff, regexp)\n unless regexp.is_a?(Array) then regexp = [regexp] end\n\n regexp.reduce(0) do |acc, regexp|\n acc + buff.reduce(''){|acc,x| acc + x}.scan(regexp).map { |x|\n x.map{|y| y.lines.count}.reduce(0){|acc,y| acc + y}\n }.reduce(0){|acc, x| acc + x}\n end\n end", "def new_comment comment, line_no = nil\n c = RDoc::Comment.new comment, @top_level, :ruby\n c.line = line_no\n c.format = @markup\n c\n end", "def line_terminator\n if line_terminator?(@codes[@pos])\n begin\n @pos += 1\n end until !line_terminator?(@codes[@pos])\n return ECMA262::LineTerminator.get\n else\n nil\n end\n end", "def render_comment(line)\n conditional, line = balance(line, ?[, ?]) if line[0] == ?[\n line.strip!\n conditional << \">\" if conditional\n\n if block_opened? && !line.empty?\n raise SyntaxError.new('Illegal nesting: nesting within a tag that already has content is illegal.', @next_line.index)\n end\n\n open = \"<!--#{conditional}\"\n\n # Render it statically if possible\n unless line.empty?\n return push_text(\"#{open} #{line} #{conditional ? \"<![endif]-->\" : \"-->\"}\")\n end\n\n push_text(open, 1)\n @output_tabs += 1\n push_and_tabulate([:comment, !conditional.nil?])\n unless line.empty?\n push_text(line)\n close\n end\n end", "def parse_comments(result)\n parse_type(result, \"comment\")\n end", "def comment(string)\n case string.strip # strip leading and trailing whitespaces\n when /^body=\"start\"/ # match starting comment\n @interesting = true\n when /^body=\"end\"/\n @interesting = false # match closing comment\n end\n end", "def comment(string)\n case string.strip # strip leading and trailing whitespaces\n when /^body=\"start\"/ # match starting comment\n @interesting = true\n when /^body=\"end\"/\n @interesting = false # match closing comment\n end\n end", "def render_comment(line)\n conditional, content = line.scan(COMMENT_REGEX)[0]\n content.strip!\n\n if @block_opened && !content.empty?\n raise SyntaxError.new('Illegal Nesting: Nesting within a tag that already has content is illegal.')\n end\n\n try_one_line = !content.empty?\n push_silent \"_hamlout.open_comment(#{try_one_line}, #{conditional.inspect}, #{@output_tabs})\"\n @output_tabs += 1\n push_and_tabulate([:comment, !conditional.nil?])\n if try_one_line\n push_text content\n close\n end\n end", "def comment_level\n @comment_level || SEVERITY_LEVELS.first\n end" ]
[ "0.7013821", "0.657153", "0.64667755", "0.6453468", "0.64014614", "0.63901246", "0.63828933", "0.63772035", "0.62741184", "0.62337816", "0.622274", "0.6112313", "0.6060025", "0.59266126", "0.5868521", "0.58633566", "0.58363307", "0.583429", "0.5823855", "0.58218694", "0.58218694", "0.58218694", "0.57964665", "0.5776348", "0.57644904", "0.57424253", "0.57319635", "0.5687281", "0.56636304", "0.56330156", "0.56330156", "0.56330156", "0.5630138", "0.5614473", "0.5605289", "0.55710167", "0.5555441", "0.5555441", "0.5555441", "0.55281144", "0.5518871", "0.55153686", "0.55141765", "0.5488642", "0.5488642", "0.5436357", "0.5436357", "0.54198325", "0.5419269", "0.5413998", "0.54037744", "0.53755176", "0.53518945", "0.53437585", "0.5335773", "0.5316415", "0.5285039", "0.5278423", "0.5274228", "0.52610576", "0.52540946", "0.5230877", "0.5230123", "0.52166194", "0.5196857", "0.51959985", "0.51863027", "0.518164", "0.51794416", "0.51680654", "0.51640743", "0.516123", "0.51611525", "0.5153972", "0.51504743", "0.5130236", "0.5115229", "0.5111646", "0.5082962", "0.50788087", "0.5073088", "0.50615233", "0.50557685", "0.5050701", "0.5039104", "0.5032275", "0.50139594", "0.5012596", "0.49726593", "0.49691877", "0.49677867", "0.49677867", "0.49652317", "0.49640894", "0.4955754", "0.49554077", "0.49525452", "0.49525452", "0.49486014", "0.49464878" ]
0.7715293
0
Tests next literal is SinleLineComment or not. If literal is SingleLineComment return ECMA262::SingleLineComment object and forward lexical parser position. Otherwise return nil and position is not changed.
def single_line_comment # // if @codes[@pos] == 0x2f and @codes[@pos + 1] == 0x2f @pos += 2 pos0 = @pos while (code = @codes[@pos]) and !line_terminator?(code) @pos += 1 end return ECMA262::SingleLineComment.new(@codes[pos0...@pos].pack("U*")) else nil end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def multi_line_comment\n # /*\n if @codes[@pos] == 0x2f and @codes[@pos + 1] == 0x2a\n @pos += 2\n pos0 = @pos\n # */\n while (code = @codes[@pos] != 0x2a) or @codes[@pos + 1] != 0x2f\n raise ParseError.new(\"no `*/' at end of comment\", self) if code.nil?\n @pos += 1\n end\n @pos +=2\n return ECMA262::MultiLineComment.new(@codes[pos0...(@pos-2)].pack(\"U*\"))\n else\n nil\n end\n end", "def _comment\n\n _save = self.pos\n while true # sequence\n _tmp = match_string(\"#\")\n unless _tmp\n self.pos = _save\n break\n end\n while true\n\n _save2 = self.pos\n while true # sequence\n _save3 = self.pos\n _tmp = apply(:_eol)\n _tmp = _tmp ? nil : true\n self.pos = _save3\n unless _tmp\n self.pos = _save2\n break\n end\n _tmp = get_byte\n unless _tmp\n self.pos = _save2\n end\n break\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true\n unless _tmp\n self.pos = _save\n break\n end\n _tmp = apply(:_eol)\n unless _tmp\n self.pos = _save\n end\n break\n end # end sequence\n\n set_failed_rule :_comment unless _tmp\n return _tmp\n end", "def _comment\n\n _save = self.pos\n begin # sequence\n _tmp = match_string(\"#\")\n break unless _tmp\n while true # kleene\n\n _save1 = self.pos\n begin # sequence\n _save2 = self.pos\n _tmp = apply(:_eol)\n _tmp = !_tmp\n self.pos = _save2\n break unless _tmp\n _tmp = match_dot\n end while false\n unless _tmp\n self.pos = _save1\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true # end kleene\n break unless _tmp\n _tmp = apply(:_eol)\n end while false\n unless _tmp\n self.pos = _save\n end # end sequence\n\n set_failed_rule :_comment unless _tmp\n return _tmp\n end", "def _Comment\n\n _save = self.pos\n while true # sequence\n _tmp = apply(:__hyphen_)\n unless _tmp\n self.pos = _save\n break\n end\n _tmp = match_string(\"//\")\n unless _tmp\n self.pos = _save\n break\n end\n while true\n\n _save2 = self.pos\n while true # sequence\n _save3 = self.pos\n _tmp = apply(:_Nl)\n _tmp = _tmp ? nil : true\n self.pos = _save3\n unless _tmp\n self.pos = _save2\n break\n end\n _tmp = get_byte\n unless _tmp\n self.pos = _save2\n end\n break\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true\n unless _tmp\n self.pos = _save\n break\n end\n _tmp = apply(:_Nl)\n unless _tmp\n self.pos = _save\n break\n end\n while true\n _tmp = apply(:_EmptyLine)\n break unless _tmp\n end\n _tmp = true\n unless _tmp\n self.pos = _save\n end\n break\n end # end sequence\n\n set_failed_rule :_Comment unless _tmp\n return _tmp\n end", "def read_line_comment(token)\n token.kind = :line_comment\n read_next() while (current = peek_next()) && current != ?\\n\n token.value = @source[token.from .. @marker.source_index] if !@skip_comments\n end", "def parse_comment\n return false unless @lexer.get and @lexer.get.type == :comment_start\n @lexer.next!\n\n buf = ''\n while token = @lexer.get\n break if token.type == :comment_end\n buf << token.value\n @lexer.next!\n end\n\n found :comment, buf\n @lexer.next!\n true\n end", "def _comment\n\n _save = self.pos\n while true # sequence\n _tmp = match_string(\"#\")\n unless _tmp\n self.pos = _save\n break\n end\n while true\n\n _save2 = self.pos\n while true # sequence\n _save3 = self.pos\n _tmp = apply(:_end_hyphen_of_hyphen_line)\n _tmp = _tmp ? nil : true\n self.pos = _save3\n unless _tmp\n self.pos = _save2\n break\n end\n _tmp = apply(:_utf8)\n unless _tmp\n self.pos = _save2\n end\n break\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true\n unless _tmp\n self.pos = _save\n end\n break\n end # end sequence\n\n set_failed_rule :_comment unless _tmp\n return _tmp\n end", "def comment?\n return @assigned_paragraph_type == :comment if @assigned_paragraph_type\n return block_type.casecmp(\"COMMENT\") if begin_block? or end_block?\n return @line =~ /^[ \\t]*?#[ \\t]/\n end", "def comment?\n @kind == :line_comment || @kind == :block_comment\n end", "def comment_line?(line_source); end", "def comment_line?(line_source); end", "def is_comment(line)\n result = false\n # If source supports single line comments\n if comment_symbols[:single_line]\n result ||= line =~ /^\\s*#{Regexp.escape(comment_symbols[:single_line])}/\n end\n\n # If source supports multi-line comments\n if comment_symbols[:multiline]\n result ||= line =~ /^\\s*#{Regexp.escape(comment_symbols[:multiline][:begin])}/\n end\n result\n end", "def lex_comment line\n # do nothing\n end", "def comment\n multi_line_comment || single_line_comment\n end", "def consume_comments\n if @s.peek(2) == '/*'\n @s.consume\n @s.consume\n\n if text = @s.scan_until(RE_COMMENT_CLOSE)\n text.slice!(-2, 2)\n else\n # Parse error.\n text = @s.consume_rest\n end\n\n return create_token(:comment, :value => text)\n end\n\n nil\n end", "def is_comment?(line)\n line =~ /^\\s*#/\n end", "def _comment\n\n _save = self.pos\n while true # sequence\n _tmp = match_string(\"(\")\n unless _tmp\n self.pos = _save\n break\n end\n while true\n\n _save2 = self.pos\n while true # choice\n _tmp = apply(:_ctext)\n break if _tmp\n self.pos = _save2\n _tmp = apply(:_quoted_pair)\n break if _tmp\n self.pos = _save2\n _tmp = apply(:_comment)\n break if _tmp\n self.pos = _save2\n break\n end # end choice\n\n break unless _tmp\n end\n _tmp = true\n unless _tmp\n self.pos = _save\n break\n end\n _tmp = match_string(\")\")\n unless _tmp\n self.pos = _save\n end\n break\n end # end sequence\n\n set_failed_rule :_comment unless _tmp\n return _tmp\n end", "def process_initial_comment(tk)\n if @statement.empty? && (@comments_last_line || 0) < tk.line_no - 2\n @comments = nil\n end\n\n return unless tk.class == TkCOMMENT\n\n case tk.text\n when Parser::SourceParser::SHEBANG_LINE\n if !@last_ns_tk && !@encoding_line\n @shebang_line = tk.text\n return\n end\n when Parser::SourceParser::ENCODING_LINE\n if (@last_ns_tk.class == TkCOMMENT && @last_ns_tk.text == @shebang_line) ||\n !@last_ns_tk\n @encoding_line = tk.text\n return\n end\n end\n\n return if [email protected]? && @comments\n return if @first_line && tk.line_no > @first_line\n\n if @comments_last_line && @comments_last_line < tk.line_no - 1\n if @comments && @statement.empty?\n @tokens.unshift(tk)\n return @done = true\n end\n @comments = nil\n end\n @comments_line = tk.line_no unless @comments\n\n # Remove the \"#\" and up to 1 space before the text\n # Since, of course, the convention is to have \"# text\"\n # and not \"#text\", which I deem ugly (you heard it here first)\n @comments ||= []\n if tk.text.start_with?('=begin')\n lines = tk.text.count(\"\\n\")\n @comments += tk.text.gsub(/\\A=begin.*\\r?\\n|\\r?\\n=end.*\\r?\\n?\\Z/, '').split(/\\r?\\n/)\n @comments_last_line = tk.line_no + lines\n else\n @comments << tk.text.gsub(/^(#+)\\s{0,1}/, '')\n @comments_hash_flag = $1 == '##' if @comments_hash_flag.nil?\n @comments_last_line = tk.line_no\n end\n @comments.pop if @comments.size == 1 && @comments.first =~ /^\\s*$/\n true\n end", "def collect_first_comment\n skip_tkspace\n comment = ''.dup\n comment = RDoc::Encoding.change_encoding comment, @encoding if @encoding\n first_line = true\n first_comment_tk_kind = nil\n line_no = nil\n\n tk = get_tk\n\n while tk && (:on_comment == tk[:kind] or :on_embdoc == tk[:kind])\n comment_body = retrieve_comment_body(tk)\n if first_line and comment_body =~ /\\A#!/ then\n skip_tkspace\n tk = get_tk\n elsif first_line and comment_body =~ /\\A#\\s*-\\*-/ then\n first_line = false\n skip_tkspace\n tk = get_tk\n else\n break if first_comment_tk_kind and not first_comment_tk_kind === tk[:kind]\n first_comment_tk_kind = tk[:kind]\n\n line_no = tk[:line_no] if first_line\n first_line = false\n comment << comment_body\n tk = get_tk\n\n if :on_nl === tk then\n skip_tkspace_without_nl\n tk = get_tk\n end\n end\n end\n\n unget_tk tk\n\n new_comment comment, line_no\n end", "def is_comment?(line)\n true if line =~ /^\\#.*$/\n end", "def ml_comment!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 40)\n\n type = ML_COMMENT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 478:4: '/*' ( . )* '*/'\n match(\"/*\")\n # --> action\n if @input.peek(1) == ?* then type = DOC_COMMENT else channel = HIDDEN end \n # <-- action\n # at line 478:88: ( . )*\n loop do #loop 4\n alt_4 = 2\n look_4_0 = @input.peek(1)\n\n if (look_4_0 == ?*) \n look_4_1 = @input.peek(2)\n\n if (look_4_1 == ?/) \n alt_4 = 2\n elsif (look_4_1.between?(0x0000, ?.) || look_4_1.between?(?0, 0xFFFF)) \n alt_4 = 1\n\n end\n elsif (look_4_0.between?(0x0000, ?)) || look_4_0.between?(?+, 0xFFFF)) \n alt_4 = 1\n\n end\n case alt_4\n when 1\n # at line 478:88: .\n match_any\n\n else\n break #loop 4\n end\n end\n match(\"*/\")\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 40)\n\n end", "def _eof_comment\n\n _save = self.pos\n begin # sequence\n _tmp = match_string(\"#\")\n break unless _tmp\n while true # kleene\n\n _save1 = self.pos\n begin # sequence\n _save2 = self.pos\n _tmp = apply(:_eof)\n _tmp = !_tmp\n self.pos = _save2\n break unless _tmp\n _tmp = match_dot\n end while false\n unless _tmp\n self.pos = _save1\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true # end kleene\n end while false\n unless _tmp\n self.pos = _save\n end # end sequence\n\n set_failed_rule :_eof_comment unless _tmp\n return _tmp\n end", "def lex_en_line_comment; end", "def lex_en_line_comment; end", "def lex_en_line_comment; end", "def parse_comment\n s0 = @scanner.pos\n if match_str('*') == :failed\n @scanner.pos = s0\n s0 = :failed\n else\n s2 = parse_nonls\n if parse_nl == :failed\n @scanner.pos = s0\n s0 = :failed\n else\n @reported_pos = s0\n s0 = s2.join\n end\n end\n if s0 == :failed\n s0 = @scanner.pos\n s1 = match_str('&')\n if s1 == :failed\n @scanner.pos = s0\n s0 = :failed\n else\n s2 = parse_nonls\n if parse_nl == :failed\n @scanner.pos = s0\n s0 = :failed\n else\n @reported_pos = s0\n s0 = '&' + s2.join\n end\n end\n end\n s0\n end", "def lex_en_line_comment=(_arg0); end", "def lex_en_line_comment=(_arg0); end", "def lex_en_line_comment=(_arg0); end", "def detect_comments\n if @input =~ %r{^\\s*[/]{2}}\n @mode = :comment\n @expression = ''\n end\n end", "def comment_line?(line_source)\n /^\\s*#/.match?(line_source)\n end", "def comment\n comment = buffer.options.comment_line.to_s\n indent = nil\n lines = []\n\n each_line do |line, fc, tc|\n line_fc = \"#{line}.#{fc}\"\n line_tc = \"#{line}.#{tc}\"\n\n next if buffer.at_end == line_tc\n\n lines << line\n\n next if indent == 0 # can't get lower\n\n line = buffer.get(\"#{line}.#{fc}\", \"#{line}.#{tc}\")\n\n next unless start = line =~ /\\S/\n\n indent ||= start\n indent = start if start < indent\n end\n\n indent ||= 0\n\n buffer.undo_record do |record|\n lines.each do |line|\n record.insert(\"#{line}.#{indent}\", comment)\n end\n end\n end", "def multiline_comment!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 50 )\n\n\n\n type = MULTILINE_COMMENT\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 229:21: '/*' ( . )* '*/'\n match( \"/*\" )\n\n # at line 229:26: ( . )*\n while true # decision 7\n alt_7 = 2\n look_7_0 = @input.peek( 1 )\n\n if ( look_7_0 == 0x2a )\n look_7_1 = @input.peek( 2 )\n\n if ( look_7_1 == 0x2f )\n alt_7 = 2\n elsif ( look_7_1.between?( 0x0, 0x2e ) || look_7_1.between?( 0x30, 0xffff ) )\n alt_7 = 1\n\n end\n elsif ( look_7_0.between?( 0x0, 0x29 ) || look_7_0.between?( 0x2b, 0xffff ) )\n alt_7 = 1\n\n end\n case alt_7\n when 1\n # at line 229:26: .\n match_any\n\n else\n break # out of loop for decision 7\n end\n end # loop for decision 7\n\n\n match( \"*/\" )\n\n\n # --> action\n channel = HIDDEN;\n # <-- action\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 50 )\n\n\n end", "def parse_with_comments(source_buffer); end", "def parse_with_comments(source_buffer); end", "def parse_with_comments(source_buffer); end", "def comment!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 15)\n\n type = COMMENT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 148:3: ( ( '#' | '//' ) (~ '\\\\n' )* | '/*' ( . )* '*/' )\n alt_10 = 2\n look_10_0 = @input.peek(1)\n\n if (look_10_0 == ?#) \n alt_10 = 1\n elsif (look_10_0 == ?/) \n look_10_2 = @input.peek(2)\n\n if (look_10_2 == ?/) \n alt_10 = 1\n elsif (look_10_2 == ?*) \n alt_10 = 2\n else\n nvae = NoViableAlternative(\"\", 10, 2)\n raise nvae\n end\n else\n nvae = NoViableAlternative(\"\", 10, 0)\n raise nvae\n end\n case alt_10\n when 1\n # at line 148:5: ( '#' | '//' ) (~ '\\\\n' )*\n # at line 148:5: ( '#' | '//' )\n alt_7 = 2\n look_7_0 = @input.peek(1)\n\n if (look_7_0 == ?#) \n alt_7 = 1\n elsif (look_7_0 == ?/) \n alt_7 = 2\n else\n nvae = NoViableAlternative(\"\", 7, 0)\n raise nvae\n end\n case alt_7\n when 1\n # at line 148:7: '#'\n match(?#)\n\n when 2\n # at line 148:13: '//'\n match(\"//\")\n\n end\n # at line 148:20: (~ '\\\\n' )*\n while true # decision 8\n alt_8 = 2\n look_8_0 = @input.peek(1)\n\n if (look_8_0.between?(0x0000, ?\\t) || look_8_0.between?(0x000B, 0xFFFF)) \n alt_8 = 1\n\n end\n case alt_8\n when 1\n # at line 148:20: ~ '\\\\n'\n if @input.peek(1).between?(0x0000, ?\\t) || @input.peek(1).between?(0x000B, 0x00FF)\n @input.consume\n else\n mse = MismatchedSet(nil)\n recover(mse)\n raise mse\n end\n\n\n\n else\n break # out of loop for decision 8\n end\n end # loop for decision 8\n\n when 2\n # at line 149:5: '/*' ( . )* '*/'\n match(\"/*\")\n # at line 149:10: ( . )*\n while true # decision 9\n alt_9 = 2\n look_9_0 = @input.peek(1)\n\n if (look_9_0 == ?*) \n look_9_1 = @input.peek(2)\n\n if (look_9_1 == ?/) \n alt_9 = 2\n elsif (look_9_1.between?(0x0000, ?.) || look_9_1.between?(?0, 0xFFFF)) \n alt_9 = 1\n\n end\n elsif (look_9_0.between?(0x0000, ?)) || look_9_0.between?(?+, 0xFFFF)) \n alt_9 = 1\n\n end\n case alt_9\n when 1\n # at line 149:10: .\n match_any\n\n else\n break # out of loop for decision 9\n end\n end # loop for decision 9\n match(\"*/\")\n\n end\n \n @state.type = type\n @state.channel = channel\n # --> action\n skip \n # <-- action\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 15)\n\n end", "def comment!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 13 )\n\n type = COMMENT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 63:5: '#' (~ ( '\\\\r' | '\\\\n' ) )*\n match( 0x23 )\n # at line 63:9: (~ ( '\\\\r' | '\\\\n' ) )*\n while true # decision 22\n alt_22 = 2\n look_22_0 = @input.peek( 1 )\n\n if ( look_22_0.between?( 0x0, 0x9 ) || look_22_0.between?( 0xb, 0xc ) || look_22_0.between?( 0xe, 0xffff ) )\n alt_22 = 1\n\n end\n case alt_22\n when 1\n # at line 63:11: ~ ( '\\\\r' | '\\\\n' )\n if @input.peek( 1 ).between?( 0x0, 0x9 ) || @input.peek( 1 ).between?( 0xb, 0xc ) || @input.peek( 1 ).between?( 0xe, 0xff )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n\n else\n break # out of loop for decision 22\n end\n end # loop for decision 22\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 13 )\n\n end", "def lex_comment(input)\n case input\n when /\\A\\s*\\/\\// # single line comment //\n if /\\A\\s*(\\/\\/.*?\\n)/.match(input)\n return :comment, $1, $'\n else\n return :open, '//', input\n end\n when /\\A\\s*\\/\\*/m # multi-line comment /* */\n if /\\A\\s*(\\/\\*.*?\\*\\/)/m.match(input)\n return :comment, $1, $'\n else\n return :open, '/*', input\n end\n when /\\A\\s*@\"/ # objective C string\n if :objective_c == @language\n token, value, rest = lex_string($')\n if :open == token\n return :open, '@\"', input\n elsif :error == token\n return :error, nil, input\n else\n return :string, '@\"' + value, rest\n end\n else\n return :error, nil, input\n end\n when /\\A\\s*\"/ # double quoted string \" \"\n token, value, rest = lex_string($')\n if :open == token\n return :open, '\"', input\n elsif :error == token\n return :error, nil, input\n else\n return :string, '\"' + value, rest\n end\n when /\\A\\s*'/ # char literal ' '\n token, value, rest = lex_char($')\n if :open == token\n return :open, \"'\", input\n elsif :error == token\n return :error, nil, input\n else\n return :char, \"'\" + value, rest\n end\n when /\\A\\s*(@#{@regex_identifier})/ # objective c directive\n value, rest = $1, $'\n if @keywords.include?(value)\n return :keyword, value, rest\n else\n return :error, nil, input\n end\n when /\\A\\s*(#{@regex_identifier})/\n value, rest = $1, $'\n if @keywords.include?(value)\n return :keyword, value, rest\n elsif @unique_tokens.has_key?(value)\n return @unique_tokens[value], value, rest\n else\n return :identifier, value, rest\n end\n when /\\A\\s*(#{@regex_float})/\n return :float, $1, $'\n when /\\A\\s*(#{@regex_hex_float})/\n return :float, $1, $'\n when /\\A\\s*(#{@regex_integer})/\n return :integer, $1, $'\n when /\\A\\s*(\\S.*)\\z/m\n val, rest = lex_punctuator($1)\n if val\n return :punctuator, val, rest\n else\n return :error, nil, input\n end\n else\n return :end\n end\n end", "def _HtmlComment\n\n _save = self.pos\n while true # sequence\n _tmp = match_string(\"<!--\")\n unless _tmp\n self.pos = _save\n break\n end\n while true\n\n _save2 = self.pos\n while true # sequence\n _save3 = self.pos\n _tmp = match_string(\"-->\")\n _tmp = _tmp ? nil : true\n self.pos = _save3\n unless _tmp\n self.pos = _save2\n break\n end\n _tmp = get_byte\n unless _tmp\n self.pos = _save2\n end\n break\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true\n unless _tmp\n self.pos = _save\n break\n end\n _tmp = match_string(\"-->\")\n unless _tmp\n self.pos = _save\n end\n break\n end # end sequence\n\n set_failed_rule :_HtmlComment unless _tmp\n return _tmp\n end", "def lex\n @index += 1\n while lexer.tokens[@index] === :COMMENT\n @index += 1\n end\n lexer.tokens[@index] or unexpected_error(:EOF)\n end", "def verify_comment(line) \n end", "def _comment\n\n _save = self.pos\n while true # choice\n _tmp = scan(/\\A(?-mix:--.*?$)/)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_multi_comment)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_comment unless _tmp\n return _tmp\n end", "def consume_comment(input)\n while not input.eof? do\n case input.look_ahead\n when \"\\\\\" : \n # In comments, only escaped backslashes and line endings matter\n if [\"\\n\", \"\\\\\"].include? input.look_ahead(1)\n input.consume\n end\n when \"\\n\" : input.consume; break \n end\n input.consume\n end\n end", "def comment?\n @contents[0] == :comment\n end", "def has_comment?(line)\n line =~ /#[^{]/\n end", "def line_comment!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 78 )\n\n\n\n type = LINE_COMMENT\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 608:8: '#' (~ ( '\\\\n' | '\\\\r' ) )* ( '\\\\r' )? '\\\\n'\n match( 0x23 )\n # at line 608:12: (~ ( '\\\\n' | '\\\\r' ) )*\n while true # decision 25\n alt_25 = 2\n look_25_0 = @input.peek( 1 )\n\n if ( look_25_0.between?( 0x0, 0x9 ) || look_25_0.between?( 0xb, 0xc ) || look_25_0.between?( 0xe, 0xffff ) )\n alt_25 = 1\n\n end\n case alt_25\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x0, 0x9 ) || @input.peek( 1 ).between?( 0xb, 0xc ) || @input.peek( 1 ).between?( 0xe, 0xffff )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n break # out of loop for decision 25\n end\n end # loop for decision 25\n\n # at line 608:26: ( '\\\\r' )?\n alt_26 = 2\n look_26_0 = @input.peek( 1 )\n\n if ( look_26_0 == 0xd )\n alt_26 = 1\n end\n case alt_26\n when 1\n # at line 608:26: '\\\\r'\n match( 0xd )\n\n end\n match( 0xa )\n\n # --> action\n channel=HIDDEN;\n # <-- action\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 78 )\n\n\n end", "def read_block_comment(token)\n token.kind = :block_comment\n\n read_next()\n while (current = read_next())\n if current == ?* && peek_next() == ?/\n current = read_next()\n break\n end\n end\n\n raise_error(:unterminated_block_comment, \"Unterminated block comment\") if !current\n token.value = @source[token.from .. @marker.source_index] if !@skip_comments\n end", "def comment_token\n return unless match = @chunk.match(COMMENT)\n # _, comment = *match\n # token(:COMMENT, comment, 0, comment.size)\n match[0].size\n end", "def comment(string)\n case string.strip # strip leading and trailing whitespaces\n when /^body=\"start\"/ # match starting comment\n @interesting = true\n when /^body=\"end\"/\n @interesting = false # match closing comment\n end\n end", "def comment(string)\n case string.strip # strip leading and trailing whitespaces\n when /^body=\"start\"/ # match starting comment\n @interesting = true\n when /^body=\"end\"/\n @interesting = false # match closing comment\n end\n end", "def preceding_comment?(node1, node2); end", "def wrap_final_comment\n current = wrap_rwhitespace(whitespaces: /\\A[ \\t\\r\\f]+/)\n if @source_buffer.slice(current.end_pos) != '#'\n # No comment, do nothing\n return self\n end\n comment = @source_buffer.slice(current.end_pos..-1)[/\\A[^\\n]+/] || ''\n current.adjust(end_pos: comment.size)\n end", "def comment!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 49 )\n\n\n\n type = COMMENT\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 228:11: '//' ( . )* ( '\\\\n' | '\\\\r' )\n match( \"//\" )\n\n # at line 228:16: ( . )*\n while true # decision 6\n alt_6 = 2\n look_6_0 = @input.peek( 1 )\n\n if ( look_6_0 == 0xa || look_6_0 == 0xd )\n alt_6 = 2\n elsif ( look_6_0.between?( 0x0, 0x9 ) || look_6_0.between?( 0xb, 0xc ) || look_6_0.between?( 0xe, 0xffff ) )\n alt_6 = 1\n\n end\n case alt_6\n when 1\n # at line 228:16: .\n match_any\n\n else\n break # out of loop for decision 6\n end\n end # loop for decision 6\n\n if @input.peek(1) == 0xa || @input.peek(1) == 0xd\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n # --> action\n channel = HIDDEN;\n # <-- action\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 49 )\n\n\n end", "def extract_comment comment\n case comment\n when Array then\n comment.map do |c|\n extract_comment c\n end\n when nil\n RDoc::Comment.new ''\n when RDoc::Comment then\n if comment.text =~ /^#[ \\t]*:section:.*\\n/ then\n start = $`\n rest = $'\n\n comment.text = if start.empty? then\n rest\n else\n rest.sub(/#{start.chomp}\\Z/, '')\n end\n end\n\n comment\n when RDoc::Markup::Document then\n comment\n else\n raise TypeError, \"unknown comment #{comment.inspect}\"\n end\n end", "def parse_comments\n @data[4][0]\n end", "def comment!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 35 )\n\n type = COMMENT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 350:9: '#' (~ ( '\\\\n' | '\\\\r' ) )*\n match( 0x23 )\n # at line 350:13: (~ ( '\\\\n' | '\\\\r' ) )*\n while true # decision 13\n alt_13 = 2\n look_13_0 = @input.peek( 1 )\n\n if ( look_13_0.between?( 0x0, 0x9 ) || look_13_0.between?( 0xb, 0xc ) || look_13_0.between?( 0xe, 0xffff ) )\n alt_13 = 1\n\n end\n case alt_13\n when 1\n # at line 350:13: ~ ( '\\\\n' | '\\\\r' )\n if @input.peek( 1 ).between?( 0x0, 0x9 ) || @input.peek( 1 ).between?( 0xb, 0xc ) || @input.peek( 1 ).between?( 0xe, 0xff )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n\n else\n break # out of loop for decision 13\n end\n end # loop for decision 13\n # --> action\n channel=HIDDEN;\n # <-- action\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 35 )\n\n end", "def parse_with_comments(source); end", "def sl_comment!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 39)\n\n type = SL_COMMENT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 470:5: '//' ( ' $ANTLR ' SRC | (~ ( '\\\\r' | '\\\\n' ) )* ) ( '\\\\r' )? '\\\\n'\n match(\"//\")\n # at line 471:5: ( ' $ANTLR ' SRC | (~ ( '\\\\r' | '\\\\n' ) )* )\n alt_2 = 2\n alt_2 = @dfa2.predict(@input)\n case alt_2\n when 1\n # at line 471:7: ' $ANTLR ' SRC\n match(\" $ANTLR \")\n src!\n\n when 2\n # at line 472:6: (~ ( '\\\\r' | '\\\\n' ) )*\n # at line 472:6: (~ ( '\\\\r' | '\\\\n' ) )*\n loop do #loop 1\n alt_1 = 2\n look_1_0 = @input.peek(1)\n\n if (look_1_0.between?(0x0000, ?\\t) || look_1_0.between?(0x000B, ?\\f) || look_1_0.between?(0x000E, 0xFFFF)) \n alt_1 = 1\n\n end\n case alt_1\n when 1\n # at line 472:6: ~ ( '\\\\r' | '\\\\n' )\n if @input.peek(1).between?(0x0000, ?\\t) || @input.peek(1).between?(0x000B, ?\\f) || @input.peek(1).between?(0x000E, 0x00FF)\n @input.consume\n else\n mse = MismatchedSet(nil)\n recover(mse)\n raise mse\n end\n\n\n\n else\n break #loop 1\n end\n end\n\n end\n # at line 474:3: ( '\\\\r' )?\n alt_3 = 2\n look_3_0 = @input.peek(1)\n\n if (look_3_0 == ?\\r) \n alt_3 = 1\n end\n case alt_3\n when 1\n # at line 474:3: '\\\\r'\n match(?\\r)\n\n end\n match(?\\n)\n # --> action\n channel=HIDDEN;\n # <-- action\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 39)\n\n end", "def comment?\n type == COMMENT_NODE\n end", "def comment_token\n if md=HERE_COMMENT.match(@chunk)\n input, comment, body = md.to_a\n token :HereComment, body, :newLine => true\n token :Terminator, \"\\n\"\n @line += count(comment, \"\\n\")\n return comment.length\n elsif md=COMMENT.match(@chunk)\n input, comment, body = md.to_a\n token :Comment, body\n return comment.length\n end\n\n return nil\n end", "def comment\n @comment ||= begin\n space = node.previous_sibling and\n space.to_s.blank? && space.to_s.count(\"\\n\") == 1 and\n comment_node = space.previous_sibling\n\n if comment_node.is_a?(REXML::Comment)\n doc.restore_erb_scriptlets(comment_node.to_s.strip)\n end\n end\n end", "def first_is_comment?(page)\n\t\treturn false unless page || page.list || page.list[0]\n\t\treturn false unless page.list[0].code == 108\n\t\tindex = 1\n\t\tlist = [page.list[0].parameters[0]]\n\t\twhile page.list[index].code == 408\n\t\t\tlist << page.list[index].parameters[0]\n\t\t\tindex += 1\n\t\tend\n\t\treturn list.collect{|line|line+=\" \"}.join\n\tend", "def commentState\n data = @stream.char()\n if data == \"-\"\n @state = @states[\"commentDash\"]\n elsif data == EOF\n # XXX parse error\n @tokenQueue.append(@currentToken)\n @state = @states[\"data\"]\n else\n @currentToken[\"data\"] += data + @stream.charsUntil(\"-\")\n end\n return true\n end", "def comment?\n node_type == COMMENT_NODE\n end", "def count_single_line_comments(buff, comment_regexp)\n a = buff.select { |l|\n not l.match(comment_regexp).nil?\n }.size\n a\n end", "def count_single_line_comments(buff, comment_regexp)\n a = buff.select { |l|\n not l.match(comment_regexp).nil?\n }.size\n a\n end", "def parse_with_comments(source_buffer)\n @lexer.comments = []\n\n [ parse(source_buffer), @lexer.comments ]\n ensure\n @lexer.comments = nil\n end", "def comment(string); end", "def comment(string); end", "def comment(string); end", "def comment(string); end", "def visit_comment(node)\n line = @original_haml_lines[node.line - 1]\n indent = line.index(/\\S/)\n @ruby_chunks << PlaceholderMarkerChunk.new(node, 'comment', indent: indent)\n end", "def spaceFirstComment(theLines)\n\n\ttheLines.each_with_index do |theLine, theIndex|\n\n\t\t# Two blank lines between brace and leading comment\n\t\tif (theLine[:text] == \"{\" && theLine[:comment].empty?)\n\t\t\n\t\t\tnextLine = theLines[theIndex + 1];\n\n\t\t\tif (nextLine[:text] =~ /^\\s*$/ && !nextLine[:comment].empty?)\n\t\t\t\ttheLines.insert(theIndex + 1, EMPTY_LINE);\n\t\t\t\ttheLines.insert(theIndex + 1, EMPTY_LINE);\n\t\t\t\tbreak;\n\t\t\tend\n\t\tend\n\n\tend\n\nend", "def new_comment comment, line_no = nil\n c = RDoc::Comment.new comment, @top_level, :ruby\n c.line = line_no\n c.format = @markup\n c\n end", "def comment_begins?\n\t\t\n\tend", "def line_comments_option; end", "def render_comment(line)\n conditional, content = line.scan(COMMENT_REGEX)[0]\n content.strip!\n\n if @block_opened && !content.empty?\n raise SyntaxError.new('Illegal Nesting: Nesting within a tag that already has content is illegal.')\n end\n\n try_one_line = !content.empty?\n push_silent \"_hamlout.open_comment(#{try_one_line}, #{conditional.inspect}, #{@output_tabs})\"\n @output_tabs += 1\n push_and_tabulate([:comment, !conditional.nil?])\n if try_one_line\n push_text content\n close\n end\n end", "def parse_comment(raw)\n c = nil\n if raw =~ /\\A\\((.+?)\\)(.+)\\z/\n c, raw = [$2, $1]\n end\n if raw =~ /\\A(.+)\\((.+?)\\)\\z/\n raw, c = [$1, $2]\n end\n [raw, c]\n end", "def parse_comment container, tk, comment\n return parse_comment_tomdoc container, tk, comment if @markup == 'tomdoc'\n column = tk[:char_no]\n line_no = comment.line.nil? ? tk[:line_no] : comment.line\n\n comment.text = comment.text.sub(/(^# +:?)(singleton-)(method:)/, '\\1\\3')\n singleton = !!$~\n\n co =\n if (comment.text = comment.text.sub(/^# +:?method: *(\\S*).*?\\n/i, '')) && !!$~ then\n line_no += $`.count(\"\\n\")\n parse_comment_ghost container, comment.text, $1, column, line_no, comment\n elsif (comment.text = comment.text.sub(/# +:?(attr(_reader|_writer|_accessor)?): *(\\S*).*?\\n/i, '')) && !!$~ then\n parse_comment_attr container, $1, $3, comment\n end\n\n if co then\n co.singleton = singleton\n co.line = line_no\n end\n\n true\n end", "def extract_comment(line)\n if is_comment? line\n clean_line = line.strip\n start_offset = comment_start.length\n end_offset = clean_line.length - comment_end.length - 1\n clean_line[start_offset..end_offset].strip\n end\n end", "def inline_comment(line)\n # For each single quote, if there is an odd number of double quote before\n # we are in a string, but if there is an even number of double quote before\n # we are out of a string so this is an inline comment and we can remove all\n # that comes after.\n double_quote = 0\n i = 0\n line.each_char do |c|\n double_quote += 1 if c == '\"'\n if c == \"'\" && double_quote.even?\n line = line[...i]\n break\n end\n i += 1\n end\n return line\n end", "def visit_haml_comment(node)\n # We want to preserve leading whitespace if it exists, but add a leading\n # whitespace if it doesn't exist so that RuboCop's LeadingCommentSpace\n # doesn't complain\n line_index = node.line - 1\n lines = @original_haml_lines[line_index..(line_index + node.text.count(\"\\n\"))].dup\n indent = lines.first.index(/\\S/)\n # Remove only the -, the # will align with regular code\n # -# comment\n # - foo()\n # becomes\n # # comment\n # foo()\n lines[0] = lines[0].sub('-', '')\n\n # Adding a space before the comment if its missing\n # We can't fix those, so make sure not to generate warnings for them.\n lines[0] = lines[0].sub(/\\A(\\s*)#(\\S)/, '\\\\1# \\\\2')\n\n HamlLint::Utils.map_after_first!(lines) do |line|\n # Since the indent/spaces of the extra line comments isn't exactly in the haml,\n # it's not RuboCop's job to fix indentation, so just make a reasonable indentation\n # to avoid offenses.\n ' ' * indent + line.sub(/^\\s*/, '# ').rstrip\n end\n\n # Using Placeholder instead of script because we can't revert back to the\n # exact original comment since multiple syntax lead to the exact same comment.\n @ruby_chunks << HamlCommentChunk.new(node, lines, end_marker_indent: indent)\n end", "def comment(_lexeme, character)\n if character =~ /./\n :comment\n else\n :default\n end\n end", "def f_slash_comment\n emit_comment(parse(\"\\n\"))\nend", "def existing_comment_option\n (options[:new_comments_only] && prev_line_comment) || (options[:inline] && original_line_has_comment?)\n end", "def nodoc_comment?(node, require_all: T.unsafe(nil)); end", "def consume_comments; end", "def process_block_comment!(index, tokens, ranges)\n start_index = index\n line_num = line_for_offset(tokens[index][1])\n while index < tokens.length - 2\n break unless tokens[index + 1][0] == :TOKEN_COMMENT\n next_line = line_for_offset(tokens[index + 1][1])\n # Tokens must be on contiguous lines\n break unless next_line == line_num + 1\n # Must not be a region comment\n comment = extract_text(tokens[index + 1][1])\n break if start_region?(comment) || end_region?(comment)\n # It's a block comment\n line_num = next_line\n index += 1\n end\n\n return index if start_index == index\n\n add_range!(create_range_span_tokens(tokens[start_index][1], tokens[index][1], REGION_COMMENT), ranges)\n index\n end", "def render_comment(line)\n conditional, line = balance(line, ?[, ?]) if line[0] == ?[\n line.strip!\n conditional << \">\" if conditional\n\n if block_opened? && !line.empty?\n raise SyntaxError.new('Illegal nesting: nesting within a tag that already has content is illegal.', @next_line.index)\n end\n\n open = \"<!--#{conditional}\"\n\n # Render it statically if possible\n unless line.empty?\n return push_text(\"#{open} #{line} #{conditional ? \"<![endif]-->\" : \"-->\"}\")\n end\n\n push_text(open, 1)\n @output_tabs += 1\n push_and_tabulate([:comment, !conditional.nil?])\n unless line.empty?\n push_text(line)\n close\n end\n end", "def last_magic_comment(source); end", "def mark_commented_lines\n [].tap do |reg|\n in_block_comment = false\n line_no = 0\n start_block = 0\n end_block = 0\n @source.each_line do |line|\n line_no = line_no+1\n\n start_block = line_no if !in_block_comment and line =~ @start_block_comment_regex\n end_block = line_no if start_block < line_no and line =~ @end_block_comment_regex\n end_block = line_no if line =~ @oneline_block_comment_regex\n\n in_block_comment = end_block < start_block\n\n reg << line_no if in_block_comment or end_block == line_no or line =~ @comment_regex\n end\n end\n end", "def commented?(entity)\n line(number: entity.last_line).end_with?(entity.ending)\n end", "def bogusCommentState\n @tokenQueue << {\"type\"=> \"Comment\", \"data\"=> @stream.charsUntil(\">\")}\n @stream.char()\n @state = @states[\"data\"]\n return true\n end", "def comment?; end", "def comment?; end", "def is_comment?\n self.object_type.downcase.to_s == \"comment\" || self.object_type.blank?\n end", "def parse_with_comments(code)\n return RubyLint::Parser.new.parse(code)\nend", "def parse_comments(comments); end", "def node_on_single_line?(node)\n return if node.source_range.start_pos.line != node.source_range.end_pos.line\n\n # The Sass parser reports an incorrect source range if the trailing curly\n # brace is on the next line, e.g.\n #\n # p {\n # }\n #\n # Since we don't want to count this as a single line node, check if the\n # last character on the first line is an opening curly brace.\n engine.lines[node.line - 1].strip[-1] != '{'\n end" ]
[ "0.74113905", "0.7284103", "0.71344495", "0.7119543", "0.70029366", "0.68539715", "0.68034357", "0.67353487", "0.66335565", "0.66077757", "0.66077757", "0.65505517", "0.6515373", "0.65053785", "0.6493863", "0.6464445", "0.64538324", "0.64524835", "0.6403432", "0.63938177", "0.6379648", "0.6348032", "0.6337746", "0.6337746", "0.6337746", "0.6327777", "0.632669", "0.632669", "0.632669", "0.63215065", "0.63117826", "0.6248642", "0.62212485", "0.6170875", "0.6170875", "0.6170875", "0.61215", "0.6114609", "0.6107794", "0.60988665", "0.6098727", "0.6097778", "0.6097377", "0.6089364", "0.6053622", "0.60503435", "0.60057974", "0.5994567", "0.5987252", "0.5985004", "0.5985004", "0.59630525", "0.5953245", "0.59511006", "0.59504", "0.5924125", "0.5913679", "0.59033847", "0.5903088", "0.58947074", "0.58712256", "0.5867414", "0.5846258", "0.5830381", "0.58043", "0.5795386", "0.5795386", "0.5738192", "0.5720635", "0.5720635", "0.5720635", "0.5720635", "0.57146084", "0.5677058", "0.5661749", "0.56460446", "0.5639657", "0.56339705", "0.55966204", "0.557255", "0.55493474", "0.55299175", "0.5523968", "0.55235845", "0.54922104", "0.547315", "0.5471006", "0.54478693", "0.5441729", "0.54300106", "0.54290277", "0.5420263", "0.5419531", "0.5407188", "0.5402178", "0.5402178", "0.53994465", "0.53970325", "0.53908396", "0.53884196" ]
0.79788345
0
Tests next literal is Token or not If literal is Token return ECMA262::Base object and forward lexical parser position. Otherwise return nil and position is not changed.
def token identifier_name || numeric_literal || punctuator || string_literal end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_token\n\n if @ss.bol?\n @line+=1\n @[email protected]\n end\n\n position=[@line,@ss.pos-@old_pos+1]\n\n return :eos if @ss.eos?\n\n case\n when text = @ss.scan(NEWLINE)\n next_token()\n when text = @ss.scan(SPACE)\n next_token()\n when text = @ss.scan(COMMENT)\n next_token()\n when text = @ss.scan(ARROW)\n return Token.new [:arrow,text,position]\n when text = @ss.scan(LT)\n return Token.new [:lt,text,position]\n when text = @ss.scan(LBRACK)\n return Token.new [:lbrack,text,position]\n when text = @ss.scan(RBRACK)\n return Token.new [:rbrack,text,position]\n when text = @ss.scan(IDENTIFIER)\n case\n when value = text.match(IDENT)\n return Token.new [:IDENT,text,position]\n when value = text.match(FLOAT)\n return Token.new [:FLOAT,text,position]\n when value = text.match(INT)\n return Token.new [:INT,text,position]\n when value = text.match(STRING)\n return Token.new [:STRING,text,position]\n when value = text.match(MODULE)\n return Token.new [:module,text,position]\n when value = text.match(CLASS)\n return Token.new [:class,text,position]\n when value = text.match(END_)\n return Token.new [:end,text,position]\n when value = text.match(ATTR)\n return Token.new [:attr,text,position]\n when value = text.match(LPAREN)\n return Token.new [:lparen,text,position]\n when value = text.match(RPAREN)\n return Token.new [:rparen,text,position]\n else\n return Token.new [:identifier,text,position]\n end\n else\n x = @ss.getch\n return Token.new [x, x,position]\n end\n end", "def literal_token\n if match = @chunk.match(OPERATOR)\n value, _ = *match\n else\n value = @chunk[0]\n end\n tag = value\n\n if COMPOUND_ASSIGN.include?(value)\n tag = :COP\n else\n case value\n when '(', '{', '[' then @ends.push(INVERSES[value])\n when ')', '}', ']'\n prev = @tokens[-1]\n pair(value)\n tokens.delete_at(-1) if prev && prev[0] == :TERM\n end\n end\n token(tag, value)\n value.size\n end", "def next_token\n result = peek_token\n @start = @finish\n return result if @start >= @expr.length\n\n if @expr[@start].numeric?\n @finish = @start + 1\n while @finish < @expr.length && @expr[@finish].to_s.numeric?\n @finish = @finish + 1\n end\n else\n @finish = @start + 1\n end\n result\n end", "def peek_token\n return nil if @start >= @expr.length\n if @start == 0 && @finish == 0\n return @expr[0]\n else\n token = @expr[@start...@finish]\n\n if token.empty?\n @finish = @finish + 1\n peek_token\n else\n return token\n end\n end\n end", "def get_token\n\t\tt = Token.new\n\t\tcase @src[@lineno][@linepos]\n\t\t\twhen ' ' then\n\t\t\t\tskip_whitespace\n\t\t\twhen '\\f' then #less likely to see this\n\t\t\t\tskip_whitespace\n\t\t\twhen '\\t' then\n\t\t\t\tskip_whitespace\n\t\t\twhen '\\v' then\n\t\t\t\tskip_whitespace\n\t\t\twhen '0'..'9' then\n\t\t\t\tt = parse_number\n\t\t\twhen 'A-Z' then\n\t\t\t\tt = parse_name\n\t\t\twhen 'a-z' then\n\t\t\t\tparse_name\n\t\t\twhen '_' then\n\t\t\t\tt = parse_name\n\t\t\twhen /[~!$%\\^&*()-+=|{}\\[\\]\\:;\\/?<>,.]/ then #very much check\n\t\t\t\tt = parse_operator\n\t\t\twhen '\"' then\n\t\t\t\tt = parse_string\n\t\tend\n\tend", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n if ss.peek(1) == \"\\n\"\n self.lineno += 1\n # line starts 1 position after the newline\n self.start_of_current_line_pos = ss.pos + 1\n end\n self.old_pos = ss.pos\n token =\n case state\n when nil then\n case\n when ss.skip(/[ \\t]+/) then\n # do nothing\n when ss.skip(/\\/\\/[^\\r\\n]*/) then\n # do nothing\n when text = ss.scan(/\\r|\\n/) then\n newline text\n when text = ss.scan(/[!=<>]=?/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/[(){},;.\\-+\\/*]/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/#{DIGIT}+(\\.#{DIGIT}+)?/) then\n action { [:NUMBER, text] }\n when text = ss.scan(/nil/) then\n action { [:NIL, text] }\n when text = ss.scan(/false/) then\n action { [:FALSE, text] }\n when text = ss.scan(/true/) then\n action { [:TRUE, text] }\n when text = ss.scan(/#{ALPHA}(#{ALPHA}|#{DIGIT})*/) then\n action { [:IDENTIFIER, text] }\n when ss.skip(/\"\"/) then\n action { [:STRING, '\"\"'] }\n when ss.skip(/\"/) then\n [:state, :IN_STRING]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n when :IN_STRING then\n case\n when text = ss.scan(/[^\"]+/) then\n action { [:STRING, \"\\\"#{text}\\\"\"] }\n when ss.skip(/\"/) then\n [:state, nil]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def token\n ready_token\n\n i = @buffer.index(/[\\[\\]()<>{}\\s\\/]/) || @buffer.size\n\n token_chars =\n if i == 0 and @buffer[i,2] == \"<<\" then 2\n elsif i == 0 and @buffer[i,2] == \">>\" then 2\n elsif i == 0 then 1\n else i\n end\n\n strip_space = !(i == 0 and @buffer[0,1] == '(')\n tok = head(token_chars, strip_space)\n\n if tok == \"\"\n nil\n elsif tok[0,1] == \"%\"\n @buffer = \"\"\n token\n else\n tok\n end\n end", "def parse_lit\n case l.front.type\n when :str then parse_str_lit\n when :chr then parse_char_lit\n when :num then parse_num_lit\n else\n error \"expected a literal\"\n end\n end", "def parse_single_token cur_tok\n\n\t\tcase cur_tok.type\n\t\twhen \"Digit\"\n\t\t\tif cur_tok.value.include? \".\"\n\t\t\t\treturn Term.new(cur_tok.line, cur_tok.col, magnitude: cur_tok.value.to_f)\n\t\t\telse\n\t\t\t\treturn Term.new(cur_tok.line, cur_tok.col, magnitude: cur_tok.value.to_i)\n\t\t\tend\n\t\twhen \"Identifier\"\n\t\t\treturn Reference.new(cur_tok.line, cur_tok.col, cur_tok.value)\n\t\twhen \"Keyword\"\n\t\t\tif cur_tok.value == \"true\" or cur_tok.value == \"false\"\n\t\t\t\treturn Boolean.new(cur_tok.line, cur_tok.col, cur_tok.value == \"true\")\n\t\t\telse\n\t\t\t\tthrow_error(\"Misplaced keyword.\", cur_tok)\n\t\t\tend\n\t\twhen \"Operator\"\n\t\t\treturn Operator.new(cur_tok.line, cur_tok.col, cur_tok.value)\n\t\twhen \"Punctuation\"\n\t\t\tthrow_error(\"Misplaced Punctuation.\", cur_tok)\n\t\twhen \"String\"\n\t\t\tthrow_error(\"Strings are not implemented in this parser.\", cur_tok)\n\t\twhen \"EOF\"\n\t\t\tthrow_error(\"EOF occured when parsing single token (the lexer & parser aren't talking to each other properly).\", cur_tok)\n\t\telse\n\t\t\tthrow_error(\"parse_single_token failed to identify the type of the token (the lexer & parser aren't talking to each other properly).\", cur_tok)\n\t\tend\n\tend", "def next()\n if @ss.scan_until(token_re)\n term = @ss.matched\n term_end = @ss.pos\n term_start = term_end - term.size\n else\n return nil\n end\n\n return Token.new(normalize(term), term_start, term_end)\n end", "def peek\n @tok ||= read_token\n end", "def literal\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 33 )\n return_value = LiteralReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n root_0 = nil\n\n _last = _first_0 = nil\n string_literal267 = nil\n __IVAR268__ = nil\n __ID269__ = nil\n string_literal270 = nil\n string_literal271 = nil\n string_literal272 = nil\n string_literal273 = nil\n __NUMBER274__ = nil\n __STRING275__ = nil\n __DOC276__ = nil\n __REGEX277__ = nil\n __ARRAY278__ = nil\n __OBJECT280__ = nil\n string_literal282 = nil\n __ID283__ = nil\n string_literal286 = nil\n argument279 = nil\n property_definition281 = nil\n parameters284 = nil\n statement_block285 = nil\n parameters287 = nil\n statement_block288 = nil\n\n tree_for_string_literal267 = nil\n tree_for_IVAR268 = nil\n tree_for_ID269 = nil\n tree_for_string_literal270 = nil\n tree_for_string_literal271 = nil\n tree_for_string_literal272 = nil\n tree_for_string_literal273 = nil\n tree_for_NUMBER274 = nil\n tree_for_STRING275 = nil\n tree_for_DOC276 = nil\n tree_for_REGEX277 = nil\n tree_for_ARRAY278 = nil\n tree_for_OBJECT280 = nil\n tree_for_string_literal282 = nil\n tree_for_ID283 = nil\n tree_for_string_literal286 = nil\n\n begin\n # at line 229:3: ( 'this' | IVAR | ID | 'null' | 'true' | 'false' | 'undefined' | NUMBER | STRING | DOC | REGEX | ^( ARRAY ( argument )* ) | ^( OBJECT ( property_definition )* ) | ^( 'function' ( ID )? parameters statement_block ) | ^( '->' ( parameters )? statement_block ) )\n alt_41 = 15\n case look_41 = @input.peek( 1 )\n when THIS then alt_41 = 1\n when IVAR then alt_41 = 2\n when ID then alt_41 = 3\n when NULL then alt_41 = 4\n when TRUE then alt_41 = 5\n when FALSE then alt_41 = 6\n when UNDEFINED then alt_41 = 7\n when NUMBER then alt_41 = 8\n when STRING then alt_41 = 9\n when DOC then alt_41 = 10\n when REGEX then alt_41 = 11\n when ARRAY then alt_41 = 12\n when OBJECT then alt_41 = 13\n when FUNCTION then alt_41 = 14\n when ARROW then alt_41 = 15\n else\n raise NoViableAlternative( \"\", 41, 0 )\n end\n case alt_41\n when 1\n root_0 = @adaptor.create_flat_list\n\n\n # at line 229:5: 'this'\n _last = @input.look\n string_literal267 = match( THIS, TOKENS_FOLLOWING_THIS_IN_literal_1643 )\n\n tree_for_string_literal267 = @adaptor.copy_node( string_literal267 )\n\n @adaptor.add_child( root_0, tree_for_string_literal267 )\n\n\n\n when 2\n root_0 = @adaptor.create_flat_list\n\n\n # at line 230:5: IVAR\n _last = @input.look\n __IVAR268__ = match( IVAR, TOKENS_FOLLOWING_IVAR_IN_literal_1649 )\n\n tree_for_IVAR268 = @adaptor.copy_node( __IVAR268__ )\n\n @adaptor.add_child( root_0, tree_for_IVAR268 )\n\n\n\n when 3\n root_0 = @adaptor.create_flat_list\n\n\n # at line 231:5: ID\n _last = @input.look\n __ID269__ = match( ID, TOKENS_FOLLOWING_ID_IN_literal_1655 )\n\n tree_for_ID269 = @adaptor.copy_node( __ID269__ )\n\n @adaptor.add_child( root_0, tree_for_ID269 )\n\n\n\n when 4\n root_0 = @adaptor.create_flat_list\n\n\n # at line 232:5: 'null'\n _last = @input.look\n string_literal270 = match( NULL, TOKENS_FOLLOWING_NULL_IN_literal_1661 )\n\n tree_for_string_literal270 = @adaptor.copy_node( string_literal270 )\n\n @adaptor.add_child( root_0, tree_for_string_literal270 )\n\n\n\n when 5\n root_0 = @adaptor.create_flat_list\n\n\n # at line 233:5: 'true'\n _last = @input.look\n string_literal271 = match( TRUE, TOKENS_FOLLOWING_TRUE_IN_literal_1667 )\n\n tree_for_string_literal271 = @adaptor.copy_node( string_literal271 )\n\n @adaptor.add_child( root_0, tree_for_string_literal271 )\n\n\n\n when 6\n root_0 = @adaptor.create_flat_list\n\n\n # at line 234:5: 'false'\n _last = @input.look\n string_literal272 = match( FALSE, TOKENS_FOLLOWING_FALSE_IN_literal_1673 )\n\n tree_for_string_literal272 = @adaptor.copy_node( string_literal272 )\n\n @adaptor.add_child( root_0, tree_for_string_literal272 )\n\n\n\n when 7\n root_0 = @adaptor.create_flat_list\n\n\n # at line 235:5: 'undefined'\n _last = @input.look\n string_literal273 = match( UNDEFINED, TOKENS_FOLLOWING_UNDEFINED_IN_literal_1679 )\n\n tree_for_string_literal273 = @adaptor.copy_node( string_literal273 )\n\n @adaptor.add_child( root_0, tree_for_string_literal273 )\n\n\n\n when 8\n root_0 = @adaptor.create_flat_list\n\n\n # at line 236:5: NUMBER\n _last = @input.look\n __NUMBER274__ = match( NUMBER, TOKENS_FOLLOWING_NUMBER_IN_literal_1685 )\n\n tree_for_NUMBER274 = @adaptor.copy_node( __NUMBER274__ )\n\n @adaptor.add_child( root_0, tree_for_NUMBER274 )\n\n\n\n when 9\n root_0 = @adaptor.create_flat_list\n\n\n # at line 237:5: STRING\n _last = @input.look\n __STRING275__ = match( STRING, TOKENS_FOLLOWING_STRING_IN_literal_1691 )\n\n tree_for_STRING275 = @adaptor.copy_node( __STRING275__ )\n\n @adaptor.add_child( root_0, tree_for_STRING275 )\n\n\n\n when 10\n root_0 = @adaptor.create_flat_list\n\n\n # at line 238:5: DOC\n _last = @input.look\n __DOC276__ = match( DOC, TOKENS_FOLLOWING_DOC_IN_literal_1697 )\n\n tree_for_DOC276 = @adaptor.copy_node( __DOC276__ )\n\n @adaptor.add_child( root_0, tree_for_DOC276 )\n\n\n\n when 11\n root_0 = @adaptor.create_flat_list\n\n\n # at line 239:5: REGEX\n _last = @input.look\n __REGEX277__ = match( REGEX, TOKENS_FOLLOWING_REGEX_IN_literal_1703 )\n\n tree_for_REGEX277 = @adaptor.copy_node( __REGEX277__ )\n\n @adaptor.add_child( root_0, tree_for_REGEX277 )\n\n\n\n when 12\n root_0 = @adaptor.create_flat_list\n\n\n # at line 240:5: ^( ARRAY ( argument )* )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __ARRAY278__ = match( ARRAY, TOKENS_FOLLOWING_ARRAY_IN_literal_1711 )\n\n tree_for_ARRAY278 = @adaptor.copy_node( __ARRAY278__ )\n\n root_1 = @adaptor.become_root( tree_for_ARRAY278, root_1 )\n\n\n\n if @input.peek == DOWN\n match( DOWN, nil )\n # at line 240:14: ( argument )*\n while true # decision 37\n alt_37 = 2\n look_37_0 = @input.peek( 1 )\n\n if ( look_37_0.between?( AMP, AMP_ASGN ) || look_37_0 == POST_DECR || look_37_0.between?( GEQ, AREF ) || look_37_0.between?( GREATER, HAT ) || look_37_0.between?( ARROW, HAT_ASGN ) || look_37_0 == ASGN || look_37_0 == REGEX || look_37_0 == IN || look_37_0 == INCR || look_37_0.between?( INSTANCEOF, RSHIFT3 ) || look_37_0 == RSHIFT3_ASGN || look_37_0.between?( RSHIFT_ASGN, COLON ) || look_37_0 == LEQ || look_37_0.between?( LESS, SLASH ) || look_37_0 == SLASH_ASGN || look_37_0.between?( STAR, DECR ) || look_37_0 == STAR_ASGN || look_37_0 == LSHIFT || look_37_0.between?( DELETE, THIS ) || look_37_0.between?( MINUS, TILDE ) || look_37_0.between?( MINUS_ASGN, MOD ) || look_37_0.between?( MOD_ASGN, TYPEOF ) || look_37_0.between?( NEQ, UMINUS ) || look_37_0.between?( NEQQ, UNDEFINED ) || look_37_0 == NEW || look_37_0 == NOT || look_37_0.between?( NULL, UPLUS ) || look_37_0 == OBJECT || look_37_0.between?( EQ, OR_ASGN ) || look_37_0 == FALSE || look_37_0 == PIPE || look_37_0 == PIPE_ASGN || look_37_0 == PLUS || look_37_0.between?( ID, DOC ) )\n alt_37 = 1\n\n end\n case alt_37\n when 1\n # at line 240:14: argument\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_argument_IN_literal_1713 )\n argument279 = argument\n @state.following.pop\n\n @adaptor.add_child( root_1, argument279.tree )\n\n\n else\n break # out of loop for decision 37\n end\n end # loop for decision 37\n\n match( UP, nil )\n end\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 13\n root_0 = @adaptor.create_flat_list\n\n\n # at line 241:5: ^( OBJECT ( property_definition )* )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __OBJECT280__ = match( OBJECT, TOKENS_FOLLOWING_OBJECT_IN_literal_1724 )\n\n tree_for_OBJECT280 = @adaptor.copy_node( __OBJECT280__ )\n\n root_1 = @adaptor.become_root( tree_for_OBJECT280, root_1 )\n\n\n\n if @input.peek == DOWN\n match( DOWN, nil )\n # at line 241:15: ( property_definition )*\n while true # decision 38\n alt_38 = 2\n look_38_0 = @input.peek( 1 )\n\n if ( look_38_0 == GET || look_38_0 == COLON || look_38_0 == SET )\n alt_38 = 1\n\n end\n case alt_38\n when 1\n # at line 241:15: property_definition\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_property_definition_IN_literal_1726 )\n property_definition281 = property_definition\n @state.following.pop\n\n @adaptor.add_child( root_1, property_definition281.tree )\n\n\n else\n break # out of loop for decision 38\n end\n end # loop for decision 38\n\n match( UP, nil )\n end\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 14\n root_0 = @adaptor.create_flat_list\n\n\n # at line 242:5: ^( 'function' ( ID )? parameters statement_block )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal282 = match( FUNCTION, TOKENS_FOLLOWING_FUNCTION_IN_literal_1737 )\n\n tree_for_string_literal282 = @adaptor.copy_node( string_literal282 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal282, root_1 )\n\n\n\n match( DOWN, nil )\n # at line 242:19: ( ID )?\n alt_39 = 2\n look_39_0 = @input.peek( 1 )\n\n if ( look_39_0 == ID )\n alt_39 = 1\n end\n case alt_39\n when 1\n # at line 242:19: ID\n _last = @input.look\n __ID283__ = match( ID, TOKENS_FOLLOWING_ID_IN_literal_1739 )\n\n tree_for_ID283 = @adaptor.copy_node( __ID283__ )\n\n @adaptor.add_child( root_1, tree_for_ID283 )\n\n\n\n end\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_parameters_IN_literal_1742 )\n parameters284 = parameters\n @state.following.pop\n\n @adaptor.add_child( root_1, parameters284.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_statement_block_IN_literal_1744 )\n statement_block285 = statement_block\n @state.following.pop\n\n @adaptor.add_child( root_1, statement_block285.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 15\n root_0 = @adaptor.create_flat_list\n\n\n # at line 243:5: ^( '->' ( parameters )? statement_block )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal286 = match( ARROW, TOKENS_FOLLOWING_ARROW_IN_literal_1754 )\n\n tree_for_string_literal286 = @adaptor.copy_node( string_literal286 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal286, root_1 )\n\n\n\n match( DOWN, nil )\n # at line 243:13: ( parameters )?\n alt_40 = 2\n look_40_0 = @input.peek( 1 )\n\n if ( look_40_0 == PARAMS )\n alt_40 = 1\n end\n case alt_40\n when 1\n # at line 243:13: parameters\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_parameters_IN_literal_1756 )\n parameters287 = parameters\n @state.following.pop\n\n @adaptor.add_child( root_1, parameters287.tree )\n\n\n end\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_statement_block_IN_literal_1759 )\n statement_block288 = statement_block\n @state.following.pop\n\n @adaptor.add_child( root_1, statement_block288.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n end\n return_value.tree = @adaptor.rule_post_processing( root_0 )\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 33 )\n\n end\n \n return return_value\n end", "def getNextToken\n \n #Check if the end has been reached\n if @currentChar == nil\n return\n end\n if @currentChar.match(/\\s/) != nil\n skipWhitespaces\n end\n \n if @currentChar == '%'\n comment\n if @currentChar.match(/\\s/) != nil\n skipWhitespaces\n end\n end \n \n if @currentChar.match(/[A-Za-z0-9_]/) != nil\n return Token.new(NAME, name)\n end\n \n if @currentChar == \"\\\"\"\n return Token.new(STRING, string)\n end\n \n if @currentChar == '{'\n advance\n return Token.new(OPENING_BRACE,'{')\n end\n \n if @currentChar == '}'\n advance\n return Token.new(CLOSING_BRACE,'}')\n end\n \n if @currentChar == '['\n advance\n return Token.new(OPENING_BRACKET,'[')\n end\n \n if @currentChar == ']'\n advance\n return Token.new(CLOSING_BRACKET,']')\n end\n \n if @currentChar == ':'\n advance\n return Token.new(COLON,':')\n end\n \n if @currentChar == '*'\n advance\n return Token.new(ASTERIX,'*')\n end\n \n if @currentChar == '='\n advance\n return Token.new(EQUALS,'=')\n end\n \n if @currentChar == ';'\n advance\n return Token.new(SEMICOLON,';')\n end\n \n if @currentChar == '^'\n advance\n return Token.new(CIRCUMFLEX,'^')\n end\n \n if @currentChar == '+'\n advance\n return Token.new(PLUS,'+')\n end\n if @currentChar == '('\n advance\n return Token.new(OPENING_PARANTHESIS,'(')\n end\n if @currentChar == ')'\n advance\n return Token.new(CLOSING_PARANTHESIS,')')\n end\n if @currentChar == '.'\n advance\n return Token.new(DOT,'.')\n end\n if @currentChar == '#'\n advance\n return Token.new(HASH,'#')\n end\n if @currentChar == ','\n advance\n return Token.new(COMMA,',')\n end\n error\n \n return Token.new(EOF,'EOF') \n \n end", "def peek\n @tokens[@pos]\n end", "def peek\n @tokens[@position]\n end", "def next_token\n return @extra_tokens.pop unless @extra_tokens.empty?\n\n skip_whitespace\n c = @sql[@pos, 1]\n return next_string(c) if quote?(c)\n\n first_is_identifier_char = identifier_char?(c)\n t = c\n @pos += 1\n while @pos < @length\n c = @sql[@pos, 1]\n break if c == ' '\n\n this_is_identifier_char = identifier_char?(c)\n break if first_is_identifier_char != this_is_identifier_char && @length > 0\n break if !this_is_identifier_char && quote?(c)\n\n t << c\n @pos += 1\n end\n\n case t\n when ''\n nil\n when /^\\d+$/\n t.to_i\n else\n t\n end\n end", "def test_token(token_type, offset = 0)\n debug \"Testing for #{token_type} with offset #{offset}\", :verbose\n\n peeked = peek_token(offset)\n !peeked.nil? && peeked.type == token_type\n end", "def next_input_element(hint)\n if ret = @lit_cache[@pos]\n @pos = @lit_nextpos[@pos]\n @head_pos = @pos\n return ret\n end\n pos0 = @pos\n #\n # skip white space here, because ECMA262(5.1.2) says:\n #\n # Simple white space and single-line comments are discarded and\n # do not appear in the stream of input elements for the\n # syntactic grammar.\n #\n while white_space or single_line_comment\n end\n\n ret = line_terminator || multi_line_comment || token\n if ret\n @lit_cache[pos0] = ret\n @lit_nextpos[pos0] = @pos\n @head_pos = @pos\n return ret\n end\n\n if @codes[@pos].nil?\n return nil\n end\n if hint.nil?\n if @codes[@pos] == 0x2f\n ECMA262::LIT_DIV_OR_REGEXP_LITERAL\n else\n nil\n end\n elsif hint == :div\n ret = div_punctuator\n if ret\n @lit_cache[pos0] = ret\n @lit_nextpos[pos0] = @pos\n end\n @head_pos = @pos\n return ret\n elsif hint == :regexp\n ret = regexp_literal\n if ret\n @lit_cache[pos0] = ret\n @lit_nextpos[pos0] = @pos\n end\n @head_pos = @pos\n return ret\n else\n if @codes[@pos] == 0x2f\n ECMA262::LIT_DIV_OR_REGEXP_LITERAL\n else\n nil\n end\n end\n end", "def peek # :nodoc:\n @tokens.peek\n end", "def next_token\n @sy = @tokenizer.next_token\n \n # ignore EOL tokens since no productions would accept them\n while @sy.type == TokenType::EOL_TOKEN\n @sy = @tokenizer.next_token\n end\n end", "def peek\n @tokens.at(@current)\n end", "def peek(token_class, token_list = nil)\n token = read_next_token(token_class)\n return token.class == token_class\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n if ss.check(/\\n/) then\n self.lineno += 1\n # line starts 1 position after the newline\n self.start_of_current_line_pos = ss.pos + 1\n end\n self.old_pos = ss.pos\n token =\n case state\n when nil, :option, :inner, :start, :macro, :rule, :group then\n case\n when ss.skip(/options?.*/) then\n [:state, :option]\n when ss.skip(/inner.*/) then\n [:state, :inner]\n when ss.skip(/macros?.*/) then\n [:state, :macro]\n when ss.skip(/rules?.*/) then\n [:state, :rule]\n when ss.skip(/start.*/) then\n [:state, :start]\n when ss.skip(/end/) then\n [:state, :END]\n when ss.skip(/\\A((?:.|\\n)*)class ([\\w:]+.*)/) then\n action { [:class, *matches] }\n when ss.skip(/\\n+/) then\n # do nothing\n when text = ss.scan(/\\s*(\\#.*)/) then\n action { [:comment, text] }\n when (state == :option) && (ss.skip(/\\s+/)) then\n # do nothing\n when (state == :option) && (text = ss.scan(/stub/i)) then\n action { [:option, text] }\n when (state == :option) && (text = ss.scan(/debug/i)) then\n action { [:option, text] }\n when (state == :option) && (text = ss.scan(/do_parse/i)) then\n action { [:option, text] }\n when (state == :option) && (text = ss.scan(/lineno/i)) then\n action { [:option, text] }\n when (state == :option) && (text = ss.scan(/column/i)) then\n action { [:option, text] }\n when (state == :inner) && (text = ss.scan(/.*/)) then\n action { [:inner, text] }\n when (state == :start) && (text = ss.scan(/.*/)) then\n action { [:start, text] }\n when (state == :macro) && (ss.skip(/\\s+(\\w+)\\s+#{RE}/o)) then\n action { [:macro, *matches] }\n when (state == :rule) && (ss.skip(/\\s*#{ST}?[\\ \\t]*#{RE}[\\ \\t]*#{ACT}?/o)) then\n action { [:rule, *matches] }\n when (state == :rule) && (ss.skip(/\\s*:[\\ \\t]*#{RE}/o)) then\n action { [:grouphead, *matches] }\n when (state == :group) && (ss.skip(/\\s*:[\\ \\t]*#{RE}/o)) then\n action { [:grouphead, *matches] }\n when (state == :group) && (ss.skip(/\\s*\\|\\s*#{ST}?[\\ \\t]*#{RE}[\\ \\t]*#{ACT}?/o)) then\n action { [:group, *matches] }\n when (state == :group) && (ss.skip(/\\s*#{ST}?[\\ \\t]*#{RE}[\\ \\t]*#{ACT}?/o)) then\n action { [:groupend, *matches] }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n when :END then\n case\n when ss.skip(/\\n+/) then\n # do nothing\n when text = ss.scan(/.*/) then\n action { [:end, text] }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def next_token\n #dputs \"@line: \" + @line\n if @state == :normal\n while true\n temp = _next_token\n unless temp == \"#white_space\" || temp == \"#comment\"\n break\n end\n end\n #dputs \"token: \" + temp\n @current_token = temp\n return temp\n else\n return :Terminate\n end\n \n end", "def next_token; end", "def next_token\n return process_string if lex_strterm\n self.cmd_state = self.command_start\n self.command_start = false\n self.space_seen = false # TODO: rename token_seen?\n self.last_state = lex_state\n\n token = nil\n\n until ss.eos? or token do\n token =\n case state\n when nil then\n case\n when ss.skip(/[\\ \\t\\r\\f\\v]/) then\n action { self.space_seen = true; next }\n when text = ss.scan(/\\n|\\#/) then\n process_newline_or_comment text\n when text = ss.scan(/[\\]\\)\\}]/) then\n process_brace_close text\n when ss.match?(/\\!/) then\n case\n when is_after_operator? && (ss.skip(/\\!\\@/)) then\n action { result EXPR_ARG, :tUBANG, \"!@\" }\n when text = ss.scan(/\\![=~]?/) then\n action { result :arg_state, TOKENS[text], text }\n end # group /\\!/\n when ss.match?(/\\./) then\n case\n when text = ss.scan(/\\.\\.\\.?/) then\n action { result EXPR_BEG, TOKENS[text], text }\n when ss.skip(/\\.\\d/) then\n action { rb_compile_error \"no .<digit> floating literal anymore put 0 before dot\" }\n when ss.skip(/\\./) then\n action { self.lex_state = EXPR_BEG; result EXPR_DOT, :tDOT, \".\" }\n end # group /\\./\n when text = ss.scan(/\\(/) then\n process_paren text\n when text = ss.scan(/\\,/) then\n action { result EXPR_PAR, TOKENS[text], text }\n when ss.match?(/=/) then\n case\n when text = ss.scan(/\\=\\=\\=|\\=\\=|\\=~|\\=>|\\=(?!begin\\b)/) then\n action { result arg_state, TOKENS[text], text }\n when bol? && (text = ss.scan(/\\=begin(?=\\s)/)) then\n process_begin text\n when text = ss.scan(/\\=(?=begin\\b)/) then\n action { result arg_state, TOKENS[text], text }\n end # group /=/\n when ruby22_label? && (text = ss.scan(/\\\"#{SIMPLE_STRING}\\\":/o)) then\n process_label text\n when text = ss.scan(/\\\"(#{SIMPLE_STRING})\\\"/o) then\n action { result EXPR_END, :tSTRING, text[1..-2].gsub(ESC) { unescape $1 } }\n when text = ss.scan(/\\\"/) then\n action { string STR_DQUOTE; result nil, :tSTRING_BEG, text }\n when text = ss.scan(/\\@\\@?\\d/) then\n action { rb_compile_error \"`#{text}` is not allowed as a variable name\" }\n when text = ss.scan(/\\@\\@?#{IDENT_CHAR}+/o) then\n process_ivar text\n when ss.match?(/:/) then\n case\n when not_end? && (text = ss.scan(/:([a-zA-Z_]#{IDENT_CHAR}*(?:[?]|[!](?!=)|=(?==>)|=(?![=>]))?)/o)) then\n process_symbol text\n when not_end? && (text = ss.scan(/\\:\\\"(#{SIMPLE_STRING})\\\"/o)) then\n process_symbol text\n when not_end? && (text = ss.scan(/\\:\\'(#{SSTRING})\\'/o)) then\n process_symbol text\n when text = ss.scan(/\\:\\:/) then\n process_colon2 text\n when text = ss.scan(/\\:/) then\n process_colon1 text\n end # group /:/\n when ss.skip(/->/) then\n action { result EXPR_ENDFN, :tLAMBDA, nil }\n when text = ss.scan(/[+-]/) then\n process_plus_minus text\n when ss.match?(/[+\\d]/) then\n case\n when ss.skip(/#{NUM_BAD}/o) then\n action { rb_compile_error \"Invalid numeric format\" }\n when ss.skip(/#{INT_DEC}/o) then\n action { int_with_base 10 }\n when ss.skip(/#{INT_HEX}/o) then\n action { int_with_base 16 }\n when ss.skip(/#{INT_BIN}/o) then\n action { int_with_base 2 }\n when ss.skip(/#{INT_OCT_BAD}/o) then\n action { rb_compile_error \"Illegal octal digit.\" }\n when ss.skip(/#{INT_OCT}/o) then\n action { int_with_base 8 }\n when ss.skip(/#{FLOAT_BAD}/o) then\n action { rb_compile_error \"Trailing '_' in number.\" }\n when text = ss.scan(/#{FLOAT}/o) then\n process_float text\n when ss.skip(/#{INT_DEC2}/o) then\n action { int_with_base 10 }\n when ss.skip(/[0-9]/) then\n action { rb_compile_error \"Bad number format\" }\n end # group /[+\\d]/\n when text = ss.scan(/\\[/) then\n process_square_bracket text\n when was_label? && (text = ss.scan(/\\'#{SSTRING}\\':?/o)) then\n process_label_or_string text\n when ss.match?(/\\|/) then\n case\n when ss.skip(/\\|\\|\\=/) then\n action { result EXPR_BEG, :tOP_ASGN, \"||\" }\n when ss.skip(/\\|\\|/) then\n action { result EXPR_BEG, :tOROP, \"||\" }\n when ss.skip(/\\|\\=/) then\n action { result EXPR_BEG, :tOP_ASGN, \"|\" }\n when ss.skip(/\\|/) then\n action { state = is_after_operator? ? EXPR_ARG : EXPR_PAR; result state, :tPIPE, \"|\" }\n end # group /\\|/\n when text = ss.scan(/\\{/) then\n process_brace_open text\n when ss.match?(/\\*/) then\n case\n when ss.skip(/\\*\\*=/) then\n action { result EXPR_BEG, :tOP_ASGN, \"**\" }\n when ss.skip(/\\*\\*/) then\n action { result(:arg_state, space_vs_beginning(:tDSTAR, :tDSTAR, :tPOW), \"**\") }\n when ss.skip(/\\*\\=/) then\n action { result(EXPR_BEG, :tOP_ASGN, \"*\") }\n when ss.skip(/\\*/) then\n action { result(:arg_state, space_vs_beginning(:tSTAR, :tSTAR, :tSTAR2), \"*\") }\n end # group /\\*/\n when ss.match?(/</) then\n case\n when ss.skip(/\\<\\=\\>/) then\n action { result :arg_state, :tCMP, \"<=>\" }\n when ss.skip(/\\<\\=/) then\n action { result :arg_state, :tLEQ, \"<=\" }\n when ss.skip(/\\<\\<\\=/) then\n action { result EXPR_BEG, :tOP_ASGN, \"<<\" }\n when text = ss.scan(/\\<\\</) then\n process_lchevron text\n when ss.skip(/\\</) then\n action { result :arg_state, :tLT, \"<\" }\n end # group /</\n when ss.match?(/>/) then\n case\n when ss.skip(/\\>\\=/) then\n action { result :arg_state, :tGEQ, \">=\" }\n when ss.skip(/\\>\\>=/) then\n action { result EXPR_BEG, :tOP_ASGN, \">>\" }\n when ss.skip(/\\>\\>/) then\n action { result :arg_state, :tRSHFT, \">>\" }\n when ss.skip(/\\>/) then\n action { result :arg_state, :tGT, \">\" }\n end # group />/\n when ss.match?(/\\`/) then\n case\n when expr_fname? && (ss.skip(/\\`/)) then\n action { result(EXPR_END, :tBACK_REF2, \"`\") }\n when expr_dot? && (ss.skip(/\\`/)) then\n action { result((cmd_state ? EXPR_CMDARG : EXPR_ARG), :tBACK_REF2, \"`\") }\n when ss.skip(/\\`/) then\n action { string STR_XQUOTE, '`'; result(nil, :tXSTRING_BEG, \"`\") }\n end # group /\\`/\n when text = ss.scan(/\\?/) then\n process_questionmark text\n when ss.match?(/&/) then\n case\n when ss.skip(/\\&\\&\\=/) then\n action { result(EXPR_BEG, :tOP_ASGN, \"&&\") }\n when ss.skip(/\\&\\&/) then\n action { result(EXPR_BEG, :tANDOP, \"&&\") }\n when ss.skip(/\\&\\=/) then\n action { result(EXPR_BEG, :tOP_ASGN, \"&\" ) }\n when ss.skip(/\\&\\./) then\n action { result(EXPR_DOT, :tLONELY, \"&.\") }\n when text = ss.scan(/\\&/) then\n process_amper text\n end # group /&/\n when text = ss.scan(/\\//) then\n process_slash text\n when ss.match?(/\\^/) then\n case\n when ss.skip(/\\^=/) then\n action { result(EXPR_BEG, :tOP_ASGN, \"^\") }\n when ss.skip(/\\^/) then\n action { result(:arg_state, :tCARET, \"^\") }\n end # group /\\^/\n when ss.skip(/\\;/) then\n action { self.command_start = true; result(EXPR_BEG, :tSEMI, \";\") }\n when ss.match?(/~/) then\n case\n when is_after_operator? && (ss.skip(/\\~@/)) then\n action { result(:arg_state, :tTILDE, \"~\") }\n when ss.skip(/\\~/) then\n action { result(:arg_state, :tTILDE, \"~\") }\n end # group /~/\n when ss.match?(/\\\\/) then\n case\n when ss.skip(/\\\\\\r?\\n/) then\n action { self.lineno += 1; self.space_seen = true; next }\n when ss.skip(/\\\\/) then\n action { rb_compile_error \"bare backslash only allowed before newline\" }\n end # group /\\\\/\n when text = ss.scan(/\\%/) then\n process_percent text\n when ss.match?(/\\$/) then\n case\n when text = ss.scan(/\\$_\\w+/) then\n process_gvar text\n when text = ss.scan(/\\$_/) then\n process_gvar text\n when text = ss.scan(/\\$[~*$?!@\\/\\\\;,.=:<>\\\"]|\\$-\\w?/) then\n process_gvar text\n when in_fname? && (text = ss.scan(/\\$([\\&\\`\\'\\+])/)) then\n process_gvar text\n when text = ss.scan(/\\$([\\&\\`\\'\\+])/) then\n process_backref text\n when in_fname? && (text = ss.scan(/\\$([1-9]\\d*)/)) then\n process_gvar text\n when text = ss.scan(/\\$([1-9]\\d*)/) then\n process_nthref text\n when text = ss.scan(/\\$0/) then\n process_gvar text\n when text = ss.scan(/\\$[^[:ascii:]]+/) then\n process_gvar text\n when text = ss.scan(/\\$\\W|\\$\\z/) then\n process_gvar_oddity text\n when text = ss.scan(/\\$\\w+/) then\n process_gvar text\n end # group /\\$/\n when text = ss.scan(/\\_/) then\n process_underscore text\n when text = ss.scan(/#{IDENT}/o) then\n process_token text\n when ss.skip(/\\004|\\032|\\000|\\Z/) then\n action { [RubyLexer::EOF, RubyLexer::EOF] }\n when text = ss.scan(/./) then\n action { rb_compile_error \"Invalid char #{text.inspect} in expression\" }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def char_tokenize(p_token, lineno, pos)\n\n\t# could be a KEYWORD, TYPE, or ID here\n\tcase p_token\n\twhen /\\b(while)\\b/\n\t\treturn Token.new(\"T_WHILE\", p_token, lineno, pos)\n\twhen /\\b(if)\\b/\n\t\treturn Token.new(\"T_IF\", p_token, lineno, pos)\n\twhen /\\b(false)\\b/\n\t\treturn Token.new(\"T_BOOLEAN\", p_token, lineno, pos)\n\twhen /\\b(true)\\b/\n\t\treturn Token.new(\"T_BOOLEAN\", p_token, lineno, pos)\n\twhen /\\b(print)\\b/\n\t\treturn Token.new(\"T_PRINT\", p_token, lineno, pos)\n\twhen /\\b(int)\\b/\n\t\treturn Token.new(\"T_TYPE\", p_token, lineno, pos)\n\twhen /\\b(string)\\b/\n\t\treturn Token.new(\"T_TYPE\", p_token, lineno, pos)\n\twhen /\\b(boolean)\\b/\n\t\treturn Token.new(\"T_TYPE\", p_token, lineno, pos)\n\twhen /\\b[a-z]\\b/\n\t\treturn Token.new(\"T_ID\", p_token, lineno, pos)\n\telse\n\t\traise UnknownSymbolError.new(p_token, lineno, pos)\n\tend\n\t\nend", "def next_token\n return if @scanner.eos?\n\n if @scanner.scan(SKIP_PATTERN)\n @column += @scanner[:before].length\n\n new_lines = @scanner[:new_line].delete(\"\\r\")\n unless new_lines.empty?\n @lineno += new_lines.length\n @column = 0\n end\n\n @column += @scanner[:after].length\n end\n\n token =\n case\n when try_match(REFERENCE_PATTERN)\n Token.new :REFERENCE, @scanner[:identifier], @lineno, @column\n when try_match(PATH_PATTERN)\n Token.new :PATH, @scanner[:identifier], @lineno, @column\n when try_match(FILTER_PATTERN) && @scanner.check(OPEN_PAREN_PATTERN)\n Token.new :FILTER, \"?\", @lineno, @column\n when try_match(OPEN_BRACKET_PATTERN)\n @state_stack.push Token.new :OPEN_BRACKET, \"[\", @lineno, @column\n @state_stack.last\n when try_match(OPEN_PAREN_PATTERN)\n @state_stack.push Token.new :OPEN_PAREN, \"(\", @lineno, @column\n @state_stack.last\n when try_match(CLOSE_BRACKET_PATTERN)\n last = @state_stack.pop\n unless last\n raise TokenizeError.unexpected(\"]\", @lineno, @column)\n end\n unless last.type == :OPEN_BRACKET\n raise TokenizeError.unbalanced(\"[\", last.lineno, last.column)\n end\n Token.new :CLOSE_BRACKET, \"]\", @lineno, @column\n when try_match(CLOSE_PAREN_PATTERN)\n last = @state_stack.pop\n unless last\n raise TokenizeError.unexpected(\")\", @lineno, @column)\n end\n unless last.type == :OPEN_PAREN\n raise TokenizeError.unbalanced(\"(\", last.lineno, last.column)\n end\n Token.new :CLOSE_PAREN, \")\", @lineno, @column\n when try_match(SELF_PATTERN)\n Token.new :SELF, \"@\", @lineno, @column\n when try_match(NUMBER_PATTERN)\n Token.new :NUMBER, BigDecimal.new(@last_captured), @lineno, @column\n when try_match(STRING_PATTERN)\n Token.new :STRING, @scanner[:str], @lineno, @column\n when try_match(TRUE_PATTERN)\n Token.new :BOOLEAN, true, @lineno, @column\n when try_match(FALSE_PATTERN)\n Token.new :BOOLEAN, false, @lineno, @column\n when try_match(COLON_PATTERN)\n Token.new :COLON, \":\", @lineno, @column\n when try_match(COMMA_PATTERN)\n Token.new :COMMA, \",\", @lineno, @column\n when try_match(ADD_PATTERN)\n Token.new :ADD, \"+\", @lineno, @column\n when try_match(SUBTRACT_PATTERN)\n case @tokens.last&.type\n when nil, :OPEN_PAREN, :OPEN_BRACKET, :COMMA, :COLON, :POW, :MOD, :ADD, :SUBTRACT, :MULTIPLY, :DIVIDE\n if @scanner.check(NUMBER_PATTERN) ||\n @scanner.check(REFERENCE_PATTERN) ||\n @scanner.check(SUBTRACT_PATTERN) ||\n @scanner.check(OPEN_PAREN_PATTERN)\n Token.new :UMINUS, \"-\", @lineno, @column\n else\n raise TokenizeError.unexpected(\"-\", @lineno, @column)\n end\n else\n Token.new :SUBTRACT, \"-\", @lineno, @column\n end\n when try_match(MULTIPLY_PATTERN)\n Token.new :MULTIPLY, \"*\", @lineno, @column\n when try_match(DIVIDE_PATTERN)\n Token.new :DIVIDE, \"/\", @lineno, @column\n when try_match(POW_PATTERN)\n Token.new :POW, \"^\", @lineno, @column\n when try_match(MOD_PATTERN)\n Token.new :MOD, \"%\", @lineno, @column\n when try_match(EQUAL_TO_PATTERN)\n Token.new :EQUAL_TO, \"==\", @lineno, @column\n when try_match(NOT_EQUAL_TO_PATTERN)\n Token.new :NOT_EQUAL_TO, \"!=\", @lineno, @column\n when try_match(GREATER_THAN_OR_EQUAL_TO_PATTERN)\n Token.new :GREATER_THAN_OR_EQUAL_TO, \">=\", @lineno, @column\n when try_match(GREATER_THAN_PATTERN)\n Token.new :GREATER_THAN, \">\", @lineno, @column\n when try_match(LESS_THAN_OR_EQUAL_TO_PATTERN)\n Token.new :LESS_THAN_OR_EQUAL_TO, \"<=\", @lineno, @column\n when try_match(LESS_THAN_PATTERN)\n Token.new :LESS_THAN, \"<\", @lineno, @column\n when try_match(AND_PATTERN)\n Token.new :AND, \"&&\", @lineno, @column\n when try_match(OR_PATTERN)\n Token.new :OR, \"||\", @lineno, @column\n when try_match(NOT_PATTERN)\n Token.new :NOT, \"!\", @lineno, @column\n when try_match(INTERSECT_PATTERN)\n Token.new :INTERSECT, \"&\", @lineno, @column\n when try_match(UNION_PATTERN)\n Token.new :UNION, \"|\", @lineno, @column\n when try_match(IDENTIFIER_PATTERN) && @scanner.check(OPEN_PAREN_PATTERN)\n unless @scanner.check(OPEN_PAREN_PATTERN)\n raise TokenizeError.unexpected(@scanner.peek(7), @lineno, @column)\n end\n Token.new :FUNCTION, @last_captured, @lineno, @column\n else\n raise TokenizeError.unexpected(@scanner.peek(7), @lineno, @column)\n end\n\n @column += @last_captured.length\n @tokens << token\n\n token\n end", "def next_token\n\t\tif (token = @tokens.shift) != nil\n\t\t\t@copy << token\n\t\t\treturn token.get_token\n\t\telse\n\t\t\treturn nil\n\t\tend\n\tend", "def next_token\n return [false, false] if @src.eos?\n# p @src.rest if @yydebug\n if ret = @src.scan(EM_OPEN_RE)\n @pre << ret\n [:EM_OPEN, ret]\n elsif ret = @src.scan(EM_CLOSE_RE)\n @pre << ret\n [:EM_CLOSE, ret]\n elsif ret = @src.scan(CODE_OPEN_RE)\n @pre << ret\n [:CODE_OPEN, ret]\n elsif ret = @src.scan(CODE_CLOSE_RE)\n @pre << ret\n [:CODE_CLOSE, ret]\n elsif ret = @src.scan(VAR_OPEN_RE)\n @pre << ret\n [:VAR_OPEN, ret]\n elsif ret = @src.scan(VAR_CLOSE_RE)\n @pre << ret\n [:VAR_CLOSE, ret]\n elsif ret = @src.scan(KBD_OPEN_RE)\n @pre << ret\n [:KBD_OPEN, ret]\n elsif ret = @src.scan(KBD_CLOSE_RE)\n @pre << ret\n [:KBD_CLOSE, ret]\n elsif ret = @src.scan(INDEX_OPEN_RE)\n @pre << ret\n [:INDEX_OPEN, ret]\n elsif ret = @src.scan(INDEX_CLOSE_RE)\n @pre << ret\n [:INDEX_CLOSE, ret]\n elsif ret = @src.scan(REF_OPEN_RE)\n @pre << ret\n [:REF_OPEN, ret]\n elsif ret = @src.scan(REF_CLOSE_RE)\n @pre << ret\n [:REF_CLOSE, ret]\n elsif ret = @src.scan(FOOTNOTE_OPEN_RE)\n @pre << ret\n [:FOOTNOTE_OPEN, ret]\n elsif ret = @src.scan(FOOTNOTE_CLOSE_RE)\n @pre << ret\n [:FOOTNOTE_CLOSE, ret]\n elsif ret = @src.scan(VERB_OPEN_RE)\n @pre << ret\n [:VERB_OPEN, ret]\n elsif ret = @src.scan(VERB_CLOSE_RE)\n @pre << ret\n [:VERB_CLOSE, ret]\n elsif ret = @src.scan(BAR_RE)\n @pre << ret\n [:BAR, ret]\n elsif ret = @src.scan(QUOTE_RE)\n @pre << ret\n [:QUOTE, ret]\n elsif ret = @src.scan(SLASH_RE)\n @pre << ret\n [:SLASH, ret]\n elsif ret = @src.scan(BACK_SLASH_RE)\n @pre << ret\n [:BACK_SLASH, ret]\n elsif ret = @src.scan(URL_RE)\n @pre << ret\n [:URL, ret]\n elsif ret = @src.scan(OTHER_RE)\n @pre << ret\n [:OTHER, ret]\n else\n ret = @src.rest\n @pre << ret\n @src.terminate\n [:OTHER, ret]\n end\nend", "def current_type\n @token_type[@current_tok]\n end", "def consume\n return nil if @s.eos?\n\n @s.mark\n\n # Consume comments.\n if comment_token = consume_comments\n if @options[:preserve_comments]\n return comment_token\n else\n return consume\n end\n end\n\n # Consume whitespace.\n return create_token(:whitespace) if @s.scan(RE_WHITESPACE)\n\n char = @s.consume\n\n case char.to_sym\n when :'\"'\n consume_string\n\n when :'#'\n if @s.peek =~ RE_NAME || valid_escape?(@s.peek(2))\n create_token(:hash,\n :type => start_identifier?(@s.peek(3)) ? :id : :unrestricted,\n :value => consume_name)\n else\n create_token(:delim, :value => char)\n end\n\n when :'$'\n if @s.peek == '='\n @s.consume\n create_token(:suffix_match)\n else\n create_token(:delim, :value => char)\n end\n\n when :\"'\"\n consume_string\n\n when :'('\n create_token(:'(')\n\n when :')'\n create_token(:')')\n\n when :*\n if @s.peek == '='\n @s.consume\n create_token(:substring_match)\n\n # Non-standard: Preserve the IE * hack.\n elsif @options[:preserve_hacks] && @s.peek =~ RE_NAME_START\n @s.reconsume\n consume_ident\n\n else\n create_token(:delim, :value => char)\n end\n\n when :+\n if start_number?\n @s.reconsume\n consume_numeric\n else\n create_token(:delim, :value => char)\n end\n\n when :','\n create_token(:comma)\n\n when :-\n nextTwoChars = @s.peek(2)\n nextThreeChars = char + nextTwoChars\n\n if start_number?(nextThreeChars)\n @s.reconsume\n consume_numeric\n elsif nextTwoChars == '->'\n @s.consume\n @s.consume\n create_token(:cdc)\n elsif start_identifier?(nextThreeChars)\n @s.reconsume\n consume_ident\n else\n create_token(:delim, :value => char)\n end\n\n when :'.'\n if start_number?\n @s.reconsume\n consume_numeric\n else\n create_token(:delim, :value => char)\n end\n\n when :':'\n create_token(:colon)\n\n when :';'\n create_token(:semicolon)\n\n when :<\n if @s.peek(3) == '!--'\n @s.consume\n @s.consume\n @s.consume\n\n create_token(:cdo)\n else\n create_token(:delim, :value => char)\n end\n\n when :'@'\n if start_identifier?(@s.peek(3))\n create_token(:at_keyword, :value => consume_name)\n else\n create_token(:delim, :value => char)\n end\n\n when :'['\n create_token(:'[')\n\n when :'\\\\'\n if valid_escape?\n @s.reconsume\n consume_ident\n else\n # Parse error.\n create_token(:delim,\n :error => true,\n :value => char)\n end\n\n when :']'\n create_token(:']')\n\n when :'^'\n if @s.peek == '='\n @s.consume\n create_token(:prefix_match)\n else\n create_token(:delim, :value => char)\n end\n\n when :'{'\n create_token(:'{')\n\n when :'}'\n create_token(:'}')\n\n when :U, :u\n if @s.peek(2) =~ RE_UNICODE_RANGE_START\n @s.consume\n consume_unicode_range\n else\n @s.reconsume\n consume_ident\n end\n\n when :|\n case @s.peek\n when '='\n @s.consume\n create_token(:dash_match)\n\n when '|'\n @s.consume\n create_token(:column)\n\n else\n create_token(:delim, :value => char)\n end\n\n when :~\n if @s.peek == '='\n @s.consume\n create_token(:include_match)\n else\n create_token(:delim, :value => char)\n end\n\n else\n case char\n when RE_DIGIT\n @s.reconsume\n consume_numeric\n\n when RE_NAME_START\n @s.reconsume\n consume_ident\n\n else\n create_token(:delim, :value => char)\n end\n end\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n token =\n case state\n when nil then\n case\n when ss.skip(/\\s+/) then\n # do nothing\n when ss.skip(/:(#{SYMBOL_NAME})/o) then\n action { emit :tSYMBOL, &:to_sym }\n when ss.skip(/\"(.+?)\"/) then\n action { emit :tSTRING }\n when ss.skip(/[-+]?\\d+\\.\\d+/) then\n action { emit :tNUMBER, &:to_f }\n when ss.skip(/[-+]?\\d+/) then\n action { emit :tNUMBER, &:to_i }\n when ss.skip(/#{Regexp.union(\n %w\"( ) { | } [ ] < > $ ! ^ ` ... + * ? ,\"\n )}/o) then\n action { emit ss.matched, &:to_sym }\n when ss.skip(/#{REGEXP}/o) then\n action { emit_regexp }\n when ss.skip(/%?(#{CONST_NAME})/o) then\n action { emit :tPARAM_CONST }\n when ss.skip(/%([a-z_]+)/) then\n action { emit :tPARAM_NAMED }\n when ss.skip(/%(\\d*)/) then\n action { emit(:tPARAM_NUMBER) { |s| s.empty? ? 1 : s.to_i } } # Map `%` to `%1`\n when ss.skip(/_(#{IDENTIFIER})/o) then\n action { emit :tUNIFY }\n when ss.skip(/_/o) then\n action { emit :tWILDCARD }\n when ss.skip(/\\#(#{CALL})/o) then\n action { @state = :ARG; emit :tFUNCTION_CALL, &:to_sym }\n when ss.skip(/#{IDENTIFIER}\\?/o) then\n action { @state = :ARG; emit :tPREDICATE, &:to_sym }\n when ss.skip(/#{NODE_TYPE}/o) then\n action { emit :tNODE_TYPE, &:to_sym }\n when ss.skip(/\\#.*/) then\n action { emit_comment }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n when :ARG then\n case\n when ss.skip(/\\(/) then\n action { @state = nil; emit :tARG_LIST }\n when ss.skip(//) then\n action { @state = nil }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def token=(_arg0); end", "def token=(_arg0); end", "def token=(_arg0); end", "def token!\r\n # at line 1:8: ( T__6 | NUMBER | SPACE )\r\n alt_3 = 3\r\n case look_3 = @input.peek( 1 )\r\n when 0x2b then alt_3 = 1\r\n when 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39 then alt_3 = 2\r\n when 0x20 then alt_3 = 3\r\n else\r\n raise NoViableAlternative( \"\", 3, 0 )\r\n end\r\n case alt_3\r\n when 1\r\n # at line 1:10: T__6\r\n t__6!\r\n\r\n when 2\r\n # at line 1:15: NUMBER\r\n number!\r\n\r\n when 3\r\n # at line 1:22: SPACE\r\n space!\r\n\r\n end\r\n end", "def token_type; end", "def find_token\n shift_token || find_regex_token || find_string_token\n end", "def get_token\n @tokenbuf << read_token if @tokenbuf.length == 0\n return @tokenbuf.shift\n end", "def get_token\n column, line = @column, @line\n while true\n get_chars if @buf.empty? and not @eof\n return [:eof, nil, line, column] if @eof and @buf.empty?\n case @buf\n when /\\A\\(/\n eat(1)\n return [:open, nil, line, column]\n when /\\A\\)/\n eat(1)\n return [:close, nil, line, column]\n when /\\A\\[\\]=/\n eat(3)\n return [:expr, :\"[]=\", line, column]\n when /\\A\\[\\](.*)/m\n # Can be partial []=\n if ($1 == \"\") and not @eof\n get_chars\n redo\n end\n eat(2)\n return [:expr, :\"[]\", line, column]\n when /\\A\\[(.*)/m\n # Can be partial [] or []=\n if ($1 == \"\") and not @eof\n get_chars\n redo\n end\n eat(1)\n return [:sqopen, nil, line, column]\n when /\\A\\]/\n eat(1)\n return [:sqclose, nil, line, column]\n when /\\A\\'/\n eat(1)\n return [:quote, nil, line, column]\n when /\\A\\`/ # `\n eat(1)\n return [:quasiquote, nil, line, column]\n when /\\A\\,@/\n eat(2)\n return [:\"unquote-splicing\", nil, line, column]\n when /\\A\\,(.?)/m\n # Possible begin of ,@\n if $1 == \"\" and not @eof\n get_chars\n redo\n else\n eat(1)\n return [:unquote, nil, line, column]\n end\n when /\\A([ \\t\\r\\n]+)/\n eat($1.size)\n column, line = @column, @line\n redo\n when /\\A(#!.*\\n)/\n eat($1.size)\n column, line = @column, @line\n redo\n when /\\A(;.*\\n)/\n eat($1.size)\n column, line = @column, @line\n redo\n when /\\A;/m\n # Partial COMMENT\n if @eof\n return\n else\n get_chars\n redo\n end\n when /\\A#!/m\n # Partial SHEBANG\n if @eof\n return\n else\n get_chars\n redo\n end\n when /\\A#t/\n eat(2)\n return [:expr, :true, line, column]\n when /\\A#f/\n eat(2)\n return [:expr, :false, line, column]\n when /\\A#\\Z/m\n # Partial SHEBANG or #T or #F\n unless @eof\n get_chars\n redo\n end\n when /\\A([+\\-]?[0-9]+(?:(?:\\.[0-9]+)?[eE][+\\-]?[0-9]+|\\.[0-9]+))(.?)/m\n # Possible FLOAT\n # Partial FLOAT also matches, so continue if possible\n s, c = $1, $2\n if (c == \"\" or c =~ /\\A[eE]/) and not @eof\n get_chars\n redo\n else\n eat(s.size)\n return [:expr, eval(s), line, column]\n end\n when /\\A([+\\-]?(?:[1-9][0-9]*|0x[0-9a-fA-F]+|0b[01]+|0o[0-7]+|0[0-7]+|0))(.?)/m\n # Possible INT\n # Partial INT also matches, so continue if possible\n # Partial FLOAT also matches, so handle it\n s, c = $1, $2\n if (c == \"\" or c =~ /\\A[.eExbo]/) and not @eof\n get_chars\n redo\n else\n eat(s.size)\n return [:expr, eval(s), line, column]\n end\n when /\\A([a-zA-Z!$%&*+\\-.:<=>?@^_~][0-9a-zA-Z!$%&*+\\-.:<=>?@^_~]*)(.?)/m\n # Possible ID\n # Partial ID also matches, so continue if possible\n if $2 == \"\" and not @eof\n get_chars\n redo\n else\n eat($1.size)\n s = $1.to_sym\n stt = Hash.new{|ht,k| k}.merge({ :\"..\" => :dotdot, :\"...\" => :dotdotdot })\n return [:expr, stt[s], line, column]\n end\n when /\\A(\"(?:[^\"#\\\\]|#*\\\\.|#+[^{\\\\#\"])*#*\")/\n eat($1.size)\n return [:expr, eval($1), line, column]\n when /\\A((\"(?:[^\"#\\\\]|#*\\\\.|#+[^{\\\\#\"])*#*)#\\{)/\n eat($1.size)\n return [:istr_beg, eval($2+'\"'), line, column]\n when /\\A(\\}((?:[^\"#\\\\]|#*\\\\.|#+[^{\\\\#\"])*#*\"))/\n eat($1.size)\n return [:istr_end, eval('\"'+$2), line, column]\n when /\\A(\\}((?:[^\"#\\\\]|#*\\\\.|#+[^{\\\\#\"])*#*)#\\{)/\n eat($1.size)\n return [:istr_mid, eval('\"'+$2+'\"'), line, column]\n when /\\A\"/ # \"\n # Possible partial string/istr_beg\n if @eof\n raise \"EOF inside string: #{@buf}\"\n else\n get_chars\n redo\n end\n when /\\A\\}/ # \"\n # Possible partial istr_mid/istr_end\n if @eof\n raise \"EOF inside interpolated string: #{@buf}\"\n else\n get_chars\n redo\n end\n when /\\A(\\/(?:[^\\/\\\\]|\\\\.)*\\/[mix]*)(.?)/\n if $2 == \"\" and not @eof\n get_chars\n redo\n else\n eat($1.size)\n return [:expr, eval($1), line, column]\n end\n when /\\A\\//\n # Possible partial regexp\n if @eof\n raise \"EOF inside interpolated string: #{@buf}\"\n else\n get_chars\n redo\n end\n else\n raise \"Not sure what to do with: #{@buf}\"\n end\n end \n end", "def next_token\n token = @enum[@pointer]\n raise NonstringTokenError unless token.nil? || token.kind_of?(String) \n @pointer += 1\n token\n end", "def literal?(node); end", "def convert_buffer_to_token(token_type); end", "def punctuator\n code0 = @codes[@pos]\n code1 = @codes[@pos+1]\n code2 = @codes[@pos+2]\n code3 = @codes[@pos+3]\n if false\n elsif code0 == 0x28 # (\n @pos += 1 # (\n return ECMA262::PUNC_LPARENTHESIS\n elsif code0 == 0x29 # )\n @pos += 1 # )\n return ECMA262::PUNC_RPARENTHESIS\n elsif code0 == 0x7b # {\n @pos += 1 # {\n return ECMA262::PUNC_LCURLYBRAC\n elsif code0 == 0x7d # }\n @pos += 1 # }\n return ECMA262::PUNC_RCURLYBRAC\n elsif code0 == 0x3b # ;\n @pos += 1 # ;\n return ECMA262::PUNC_SEMICOLON\n elsif code0 == 0x3d # =\n if code1 == 0x3d and code2 == 0x3d # ===\n @pos += 3\n return ECMA262::PUNC_SEQ\n end\n if code1 == 0x3d # ==\n @pos += 2\n return ECMA262::PUNC_EQ\n end\n @pos += 1 # =\n return ECMA262::PUNC_ASSIGN\n elsif code0 == 0x21 # !\n if code1 == 0x3d and code2 == 0x3d # !==\n @pos += 3\n return ECMA262::PUNC_SNEQ\n end\n if code1 == 0x3d # !=\n @pos += 2\n return ECMA262::PUNC_NEQ\n end\n @pos += 1 # !\n return ECMA262::PUNC_LNOT\n elsif code0 == 0x25 # %\n if code1 == 0x3d # %=\n @pos += 2\n return ECMA262::PUNC_MODASSIGN\n end\n @pos += 1 # %\n return ECMA262::PUNC_MOD\n elsif code0 == 0x26 # &\n if code1 == 0x3d # &=\n @pos += 2\n return ECMA262::PUNC_ANDASSIGN\n end\n if code1 == 0x26 # &&\n @pos += 2\n return ECMA262::PUNC_LAND\n end\n @pos += 1 # &\n return ECMA262::PUNC_AND\n elsif code0 == 0x2a # *\n if code1 == 0x3d # *=\n @pos += 2\n return ECMA262::PUNC_MULASSIGN\n end\n @pos += 1 # *\n return ECMA262::PUNC_MUL\n elsif code0 == 0x2b # +\n if code1 == 0x3d # +=\n @pos += 2\n return ECMA262::PUNC_ADDASSIGN\n end\n if code1 == 0x2b # ++\n @pos += 2\n return ECMA262::PUNC_INC\n end\n @pos += 1 # +\n return ECMA262::PUNC_ADD\n elsif code0 == 0x2c # ,\n @pos += 1 # ,\n return ECMA262::PUNC_COMMA\n elsif code0 == 0x2d # -\n if code1 == 0x3d # -=\n @pos += 2\n return ECMA262::PUNC_SUBASSIGN\n end\n if code1 == 0x2d # --\n @pos += 2\n return ECMA262::PUNC_DEC\n end\n @pos += 1 # -\n return ECMA262::PUNC_SUB\n elsif code0 == 0x2e # .\n @pos += 1 # .\n return ECMA262::PUNC_PERIOD\n elsif code0 == 0x3a # :\n @pos += 1 # :\n return ECMA262::PUNC_COLON\n elsif code0 == 0x3c # <\n if code1 == 0x3d # <=\n @pos += 2\n return ECMA262::PUNC_LTEQ\n end\n if code1 == 0x3c and code2 == 0x3d # <<=\n @pos += 3\n return ECMA262::PUNC_LSHIFTASSIGN\n end\n if code1 == 0x3c # <<\n @pos += 2\n return ECMA262::PUNC_LSHIFT\n end\n @pos += 1 # <\n return ECMA262::PUNC_LT\n elsif code0 == 0x3e # >\n if code1 == 0x3e and code2 == 0x3e and code3 == 0x3d # >>>=\n @pos += 4\n return ECMA262::PUNC_URSHIFTASSIGN\n end\n if code1 == 0x3e and code2 == 0x3e # >>>\n @pos += 3\n return ECMA262::PUNC_URSHIFT\n end\n if code1 == 0x3e and code2 == 0x3d # >>=\n @pos += 3\n return ECMA262::PUNC_RSHIFTASSIGN\n end\n if code1 == 0x3e # >>\n @pos += 2\n return ECMA262::PUNC_RSHIFT\n end\n if code1 == 0x3d # >=\n @pos += 2\n return ECMA262::PUNC_GTEQ\n end\n @pos += 1 # >\n return ECMA262::PUNC_GT\n elsif code0 == 0x3f # ?\n @pos += 1 # ?\n return ECMA262::PUNC_CONDIF\n elsif code0 == 0x5b # [\n @pos += 1 # [\n return ECMA262::PUNC_LSQBRAC\n elsif code0 == 0x5d # ]\n @pos += 1 # ]\n return ECMA262::PUNC_RSQBRAC\n elsif code0 == 0x5e # ^\n if code1 == 0x3d # ^=\n @pos += 2\n return ECMA262::PUNC_XORASSIGN\n end\n @pos += 1 # ^\n return ECMA262::PUNC_XOR\n elsif code0 == 0x7c # |\n if code1 == 0x7c # ||\n @pos += 2\n return ECMA262::PUNC_LOR\n end\n if code1 == 0x3d # |=\n @pos += 2\n return ECMA262::PUNC_ORASSIGN\n end\n @pos += 1 # |\n return ECMA262::PUNC_OR\n elsif code0 == 0x7e # ~\n @pos += 1 # ~\n return ECMA262::PUNC_NOT\n end\n nil\n end", "def next_token; @stack.shift; end", "def token_spec\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 3)\n return_value = TokenSpecReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n root_0 = nil\n lit = nil\n __TOKEN_REF16__ = nil\n char_literal17 = nil\n char_literal18 = nil\n\n tree_for_lit = nil\n tree_for_TOKEN_REF16 = nil\n tree_for_char_literal17 = nil\n tree_for_char_literal18 = nil\n stream_STRING_LITERAL = ANTLR3::AST::RewriteRuleTokenStream.new(@adaptor, \"token STRING_LITERAL\")\n stream_T__71 = ANTLR3::AST::RewriteRuleTokenStream.new(@adaptor, \"token T__71\")\n stream_CHAR_LITERAL = ANTLR3::AST::RewriteRuleTokenStream.new(@adaptor, \"token CHAR_LITERAL\")\n stream_LABEL_ASSIGN = ANTLR3::AST::RewriteRuleTokenStream.new(@adaptor, \"token LABEL_ASSIGN\")\n stream_TOKEN_REF = ANTLR3::AST::RewriteRuleTokenStream.new(@adaptor, \"token TOKEN_REF\")\n\n begin\n # at line 114:4: TOKEN_REF ( '=' (lit= STRING_LITERAL | lit= CHAR_LITERAL ) -> ^( '=' TOKEN_REF $lit) | -> TOKEN_REF ) ';'\n __TOKEN_REF16__ = match(TOKEN_REF, TOKENS_FOLLOWING_TOKEN_REF_IN_token_spec_492) \n if @state.backtracking == 0\n stream_TOKEN_REF.add(__TOKEN_REF16__)\n end\n # at line 115:3: ( '=' (lit= STRING_LITERAL | lit= CHAR_LITERAL ) -> ^( '=' TOKEN_REF $lit) | -> TOKEN_REF )\n alt_10 = 2\n look_10_0 = @input.peek(1)\n\n if (look_10_0 == LABEL_ASSIGN) \n alt_10 = 1\n elsif (look_10_0 == T__71) \n alt_10 = 2\n else\n @state.backtracking > 0 and raise(ANTLR3::Error::BacktrackingFailed)\n nvae = NoViableAlternative(\"\", 10, 0)\n raise nvae\n end\n case alt_10\n when 1\n # at line 115:5: '=' (lit= STRING_LITERAL | lit= CHAR_LITERAL )\n char_literal17 = match(LABEL_ASSIGN, TOKENS_FOLLOWING_LABEL_ASSIGN_IN_token_spec_498) \n if @state.backtracking == 0\n stream_LABEL_ASSIGN.add(char_literal17)\n end\n # at line 115:9: (lit= STRING_LITERAL | lit= CHAR_LITERAL )\n alt_9 = 2\n look_9_0 = @input.peek(1)\n\n if (look_9_0 == STRING_LITERAL) \n alt_9 = 1\n elsif (look_9_0 == CHAR_LITERAL) \n alt_9 = 2\n else\n @state.backtracking > 0 and raise(ANTLR3::Error::BacktrackingFailed)\n nvae = NoViableAlternative(\"\", 9, 0)\n raise nvae\n end\n case alt_9\n when 1\n # at line 115:10: lit= STRING_LITERAL\n lit = match(STRING_LITERAL, TOKENS_FOLLOWING_STRING_LITERAL_IN_token_spec_503) \n if @state.backtracking == 0\n stream_STRING_LITERAL.add(lit)\n end\n\n when 2\n # at line 115:29: lit= CHAR_LITERAL\n lit = match(CHAR_LITERAL, TOKENS_FOLLOWING_CHAR_LITERAL_IN_token_spec_507) \n if @state.backtracking == 0\n stream_CHAR_LITERAL.add(lit)\n end\n\n end\n # AST Rewrite\n # elements: lit, LABEL_ASSIGN, TOKEN_REF\n # token labels: lit\n # rule labels: return_value\n # token list labels: \n # rule list labels: \n # wildcard labels: \n if @state.backtracking == 0\n\n return_value.tree = root_0\n stream_lit = token_stream(\"token lit\", lit)\n stream_return_value = return_value ? subtree_stream(\"rule return_value\", return_value.tree) : subtree_stream(\"token return_value\")\n\n root_0 = @adaptor.create_flat_list!\n # 115:47: -> ^( '=' TOKEN_REF $lit)\n # at line 115:50: ^( '=' TOKEN_REF $lit)\n root_1 = @adaptor.create_flat_list!\n root_1 = @adaptor.become_root(stream_LABEL_ASSIGN.next_node, root_1)\n\n @adaptor.add_child(root_1, stream_TOKEN_REF.next_node)\n @adaptor.add_child(root_1, stream_lit.next_node)\n\n @adaptor.add_child(root_0, root_1)\n\n\n\n return_value.tree = root_0\n\n end\n when 2\n # at line 116:16: \n # AST Rewrite\n # elements: TOKEN_REF\n # token labels: \n # rule labels: return_value\n # token list labels: \n # rule list labels: \n # wildcard labels: \n if @state.backtracking == 0\n\n return_value.tree = root_0\n stream_return_value = return_value ? subtree_stream(\"rule return_value\", return_value.tree) : subtree_stream(\"token return_value\")\n\n root_0 = @adaptor.create_flat_list!\n # 116:16: -> TOKEN_REF\n @adaptor.add_child(root_0, stream_TOKEN_REF.next_node)\n\n\n\n return_value.tree = root_0\n\n end\n end\n char_literal18 = match(T__71, TOKENS_FOLLOWING_T__71_IN_token_spec_546) \n if @state.backtracking == 0\n stream_T__71.add(char_literal18)\n end\n # - - - - - - - rule clean up - - - - - - - -\n return_value.stop = @input.look(-1)\n\n if @state.backtracking == 0\n\n return_value.tree = @adaptor.rule_post_processing(root_0)\n @adaptor.set_token_boundaries(return_value.tree, return_value.start, return_value.stop)\n\n end\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n return_value.tree = @adaptor.create_error_node!(@input, return_value.start, @input.look(-1), re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 3)\n\n end\n \n return return_value\n end", "def next_token\n @current_token = @lexer.next_token\n end", "def literal\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 22 )\n\n\n value = nil\n\n\n a = nil\n\n\n begin\n # at line 142:3: (a= INTEGER |a= FLOAT |a= BOOLEAN |a= STRING |a= CHAR )\n alt_38 = 5\n case look_38 = @input.peek( 1 )\n when INTEGER then alt_38 = 1\n when FLOAT then alt_38 = 2\n when BOOLEAN then alt_38 = 3\n when STRING then alt_38 = 4\n when CHAR then alt_38 = 5\n else\n @state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )\n\n\n\n raise NoViableAlternative( \"\", 38, 0 )\n\n end\n case alt_38\n when 1\n # at line 142:5: a= INTEGER\n a = match( INTEGER, TOKENS_FOLLOWING_INTEGER_IN_literal_1037 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Int, a.text) \n # <-- action\n end\n\n\n when 2\n # at line 143:5: a= FLOAT\n a = match( FLOAT, TOKENS_FOLLOWING_FLOAT_IN_literal_1047 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Float, a.text) \n # <-- action\n end\n\n\n when 3\n # at line 144:5: a= BOOLEAN\n a = match( BOOLEAN, TOKENS_FOLLOWING_BOOLEAN_IN_literal_1059 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Bool, a.text) \n # <-- action\n end\n\n\n when 4\n # at line 145:5: a= STRING\n a = match( STRING, TOKENS_FOLLOWING_STRING_IN_literal_1069 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:String, a.text) \n # <-- action\n end\n\n\n when 5\n # at line 146:5: a= CHAR\n a = match( CHAR, TOKENS_FOLLOWING_CHAR_IN_literal_1080 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Char, a.text) \n # <-- action\n end\n\n\n end\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 22 )\n\n\n end\n\n return value\n end", "def octal_integer_literal\n code = @codes[@pos]\n if code.nil?\n return nil\n elsif code == 0x30 and (code1 = @codes[@pos + 1]) >= 0x30 and code1 <= 0x37\n @pos += 1\n pos0 = @pos\n while code = @codes[@pos] and code >= 0x30 and code <= 0x37\n @pos += 1\n end\n if identifier_start?(code)\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n else\n return ECMA262::ECMA262Numeric.new(@codes[pos0...@pos].pack(\"U*\").to_i(8))\n end\n else\n nil\n end\n end", "def check?(token_type)\n return false if at_end?\n return peek.type == token_type\n end", "def getTokenKind()\n $tokens.at(0)\nend", "def token_spec\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 3 )\n return_value = TokenSpecReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n root_0 = nil\n lit = nil\n __TOKEN_REF16__ = nil\n char_literal17 = nil\n char_literal18 = nil\n\n tree_for_lit = nil\n tree_for_TOKEN_REF16 = nil\n tree_for_char_literal17 = nil\n tree_for_char_literal18 = nil\n stream_STRING_LITERAL = ANTLR3::AST::RewriteRuleTokenStream.new( @adaptor, \"token STRING_LITERAL\" )\n stream_T__71 = ANTLR3::AST::RewriteRuleTokenStream.new( @adaptor, \"token T__71\" )\n stream_CHAR_LITERAL = ANTLR3::AST::RewriteRuleTokenStream.new( @adaptor, \"token CHAR_LITERAL\" )\n stream_LABEL_ASSIGN = ANTLR3::AST::RewriteRuleTokenStream.new( @adaptor, \"token LABEL_ASSIGN\" )\n stream_TOKEN_REF = ANTLR3::AST::RewriteRuleTokenStream.new( @adaptor, \"token TOKEN_REF\" )\n\n begin\n # at line 105:4: TOKEN_REF ( '=' (lit= STRING_LITERAL | lit= CHAR_LITERAL ) -> ^( '=' TOKEN_REF $lit) | -> TOKEN_REF ) ';'\n __TOKEN_REF16__ = match( TOKEN_REF, TOKENS_FOLLOWING_TOKEN_REF_IN_token_spec_487 )\n if @state.backtracking == 0\n stream_TOKEN_REF.add( __TOKEN_REF16__ )\n end\n # at line 106:3: ( '=' (lit= STRING_LITERAL | lit= CHAR_LITERAL ) -> ^( '=' TOKEN_REF $lit) | -> TOKEN_REF )\n alt_10 = 2\n look_10_0 = @input.peek( 1 )\n\n if ( look_10_0 == LABEL_ASSIGN )\n alt_10 = 1\n elsif ( look_10_0 == T__71 )\n alt_10 = 2\n else\n @state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )\n\n raise NoViableAlternative( \"\", 10, 0 )\n end\n case alt_10\n when 1\n # at line 106:5: '=' (lit= STRING_LITERAL | lit= CHAR_LITERAL )\n char_literal17 = match( LABEL_ASSIGN, TOKENS_FOLLOWING_LABEL_ASSIGN_IN_token_spec_493 )\n if @state.backtracking == 0\n stream_LABEL_ASSIGN.add( char_literal17 )\n end\n # at line 106:9: (lit= STRING_LITERAL | lit= CHAR_LITERAL )\n alt_9 = 2\n look_9_0 = @input.peek( 1 )\n\n if ( look_9_0 == STRING_LITERAL )\n alt_9 = 1\n elsif ( look_9_0 == CHAR_LITERAL )\n alt_9 = 2\n else\n @state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )\n\n raise NoViableAlternative( \"\", 9, 0 )\n end\n case alt_9\n when 1\n # at line 106:10: lit= STRING_LITERAL\n lit = match( STRING_LITERAL, TOKENS_FOLLOWING_STRING_LITERAL_IN_token_spec_498 )\n if @state.backtracking == 0\n stream_STRING_LITERAL.add( lit )\n end\n\n when 2\n # at line 106:29: lit= CHAR_LITERAL\n lit = match( CHAR_LITERAL, TOKENS_FOLLOWING_CHAR_LITERAL_IN_token_spec_502 )\n if @state.backtracking == 0\n stream_CHAR_LITERAL.add( lit )\n end\n\n end\n # AST Rewrite\n # elements: TOKEN_REF, lit, LABEL_ASSIGN\n # token labels: lit\n # rule labels: return_value\n # token list labels: \n # rule list labels: \n # wildcard labels: \n if @state.backtracking == 0\n\n return_value.tree = root_0\n stream_lit = token_stream( \"token lit\", lit )\n stream_return_value = return_value ? subtree_stream( \"rule return_value\", return_value.tree ) : subtree_stream( \"token return_value\" )\n\n root_0 = @adaptor.create_flat_list\n # 106:47: -> ^( '=' TOKEN_REF $lit)\n # at line 106:50: ^( '=' TOKEN_REF $lit)\n root_1 = @adaptor.create_flat_list\n root_1 = @adaptor.become_root( stream_LABEL_ASSIGN.next_node, root_1 )\n\n @adaptor.add_child( root_1, stream_TOKEN_REF.next_node )\n @adaptor.add_child( root_1, stream_lit.next_node )\n\n @adaptor.add_child( root_0, root_1 )\n\n\n\n return_value.tree = root_0\n\n end\n when 2\n # at line 107:16: \n # AST Rewrite\n # elements: TOKEN_REF\n # token labels: \n # rule labels: return_value\n # token list labels: \n # rule list labels: \n # wildcard labels: \n if @state.backtracking == 0\n\n return_value.tree = root_0\n stream_return_value = return_value ? subtree_stream( \"rule return_value\", return_value.tree ) : subtree_stream( \"token return_value\" )\n\n root_0 = @adaptor.create_flat_list\n # 107:16: -> TOKEN_REF\n @adaptor.add_child( root_0, stream_TOKEN_REF.next_node )\n\n\n\n return_value.tree = root_0\n\n end\n end\n char_literal18 = match( T__71, TOKENS_FOLLOWING_T__71_IN_token_spec_541 )\n if @state.backtracking == 0\n stream_T__71.add( char_literal18 )\n end\n # - - - - - - - rule clean up - - - - - - - -\n return_value.stop = @input.look( -1 )\n\n if @state.backtracking == 0\n\n return_value.tree = @adaptor.rule_post_processing( root_0 )\n @adaptor.set_token_boundaries( return_value.tree, return_value.start, return_value.stop )\n\n end\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n return_value.tree = @adaptor.create_error_node( @input, return_value.start, @input.look(-1), re )\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 3 )\n\n end\n \n return return_value\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n token =\n case state\n when nil then\n case\n when text = ss.scan(/#{DIGIT}/) then\n action { [:DIGIT, text.to_i] }\n when text = ss.scan(/#{ADDITION}/) then\n action { [:ADDITION, text] }\n when text = ss.scan(/#{SUBSTRACTION}/) then\n action { [:SUBSTRACTION, text] }\n when text = ss.scan(/#{MULTIPLICATION}/) then\n action { [:MULTIPLICATION, text] }\n when text = ss.scan(/#{DIVISION}/) then\n action { [:DIVISION, text] }\n when text = ss.scan(/#{OPENING_PARANTHESIS}/) then\n action { [:OPENING_PARANTHESIS, text] }\n when text = ss.scan(/#{CLOSING_PARANTHESIS}/) then\n action { [:CLOSING_PARANTHESIS, text] }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def advance\n r = yylex\n self.token = r\n\n raise \"yylex returned nil\" unless r\n\n return RubyLexer::EOF != r\n end", "def _literal\n\n _save = self.pos\n while true # choice\n _tmp = apply(:_number)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_string)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_literal unless _tmp\n return _tmp\n end", "def tokenize (p_token, type, lineno, pos)\n\t\n\tif type == \"op\"\n\t\treturn op_tokenize(p_token, lineno, pos)\n\t\n\telsif type == \"character\"\n\t\treturn char_tokenize(p_token, lineno, pos)\n\t\n\telsif type == \"string\"\n\t\treturn string_tokenize(p_token, lineno, pos)\n\t\n\telsif type == \"digit\"\n\t\treturn digit_tokenize(p_token, lineno, pos)\n\t\n\telse\n\t\t# should create an error here, just for thoroughness\n\tend\nend", "def consume\n return nil if @s.eos?\n\n @s.mark\n return create_token(:whitespace) if @s.scan(RE_WHITESPACE)\n\n char = @s.consume\n\n case char.to_sym\n when :'\"'\n consume_string('\"')\n\n when :'#'\n if @s.peek =~ RE_NAME || valid_escape?\n create_token(:hash,\n :type => start_identifier? ? :id : :unrestricted,\n :value => consume_name)\n else\n create_token(:delim, :value => char)\n end\n\n when :'$'\n if @s.peek == '='\n @s.consume\n create_token(:suffix_match)\n else\n create_token(:delim, :value => char)\n end\n\n when :\"'\"\n consume_string(\"'\")\n\n when :'('\n create_token(:'(')\n\n when :')'\n create_token(:')')\n\n when :*\n if @s.peek == '='\n @s.consume\n create_token(:substring_match)\n\n elsif @options[:preserve_hacks] && @s.peek =~ RE_NAME_START\n # NON-STANDARD: IE * hack\n @s.reconsume\n consume_ident\n\n else\n create_token(:delim, :value => char)\n end\n\n when :+\n if start_number?(char + @s.peek(2))\n @s.reconsume\n consume_numeric\n else\n create_token(:delim, :value => char)\n end\n\n when :','\n create_token(:comma)\n\n when :-\n if start_number?(char + @s.peek(2))\n @s.reconsume\n consume_numeric\n elsif start_identifier?(char + @s.peek(2))\n @s.reconsume\n consume_ident\n elsif @s.peek(2) == '->'\n @s.consume\n @s.consume\n create_token(:cdc)\n else\n create_token(:delim, :value => char)\n end\n\n when :'.'\n if start_number?(char + @s.peek(2))\n @s.reconsume\n consume_numeric\n else\n create_token(:delim, :value => char)\n end\n\n when :/\n if @s.peek == '*'\n @s.consume\n\n if text = @s.scan_until(RE_COMMENT_CLOSE)\n text.slice!(-2, 2)\n else\n text = @s.consume_rest\n end\n\n if @options[:preserve_comments]\n create_token(:comment, :value => text)\n else\n consume\n end\n else\n create_token(:delim, :value => char)\n end\n\n when :':'\n create_token(:colon)\n\n when :';'\n create_token(:semicolon)\n\n when :<\n if @s.peek(3) == '!--'\n @s.consume\n @s.consume\n @s.consume\n\n create_token(:cdo)\n else\n create_token(:delim, :value => char)\n end\n\n when :'@'\n if start_identifier?\n create_token(:at_keyword, :value => consume_name)\n else\n create_token(:delim, :value => char)\n end\n\n when :'['\n create_token(:'[')\n\n when :'\\\\'\n if valid_escape?(char + @s.peek)\n @s.reconsume\n consume_ident\n else\n create_token(:delim,\n :error => true,\n :value => char)\n end\n\n when :']'\n create_token(:']')\n\n when :'^'\n if @s.peek == '='\n @s.consume\n create_token(:prefix_match)\n else\n create_token(:delim, :value => char)\n end\n\n when :'{'\n create_token(:'{')\n\n when :'}'\n create_token(:'}')\n\n when :U, :u\n if @s.peek(2) =~ RE_UNICODE_RANGE_START\n @s.consume\n consume_unicode_range\n else\n @s.reconsume\n consume_ident\n end\n\n when :|\n case @s.peek\n when '='\n @s.consume\n create_token(:dash_match)\n\n when '|'\n @s.consume\n create_token(:column)\n\n else\n create_token(:delim, :value => char)\n end\n\n when :~\n if @s.peek == '='\n @s.consume\n create_token(:include_match)\n else\n create_token(:delim, :value => char)\n end\n\n else\n case char\n when RE_DIGIT\n @s.reconsume\n consume_numeric\n\n when RE_NAME_START\n @s.reconsume\n consume_ident\n\n else\n create_token(:delim, :value => char)\n end\n end\n end", "def parse\n token = @tokens.next\n\n return token if [:nil, :true, :false].include? token\n\n obj = [token]\n\n rest =\n case token\n when :array then parse_array\n when :bignum then parse_bignum\n when :class, :module then parse_class\n when :data then parse_data\n when :extended then parse_extended\n when :fixnum, :link, :symbol_link then [@tokens.next]\n when :float then parse_float\n when :hash then parse_hash\n when :hash_default then parse_hash_def\n when :object then parse_object\n when :regexp then parse_regexp\n when :string then parse_string\n when :struct then parse_struct\n when :symbol then parse_symbol\n when :user_class then parse_extended\n when :user_defined then parse_user_defined\n when :user_marshal then parse_user_marshal\n when :instance_variables then\n [parse].concat parse_instance_variables\n when :module_old then\n obj[0] = :module\n parse_class\n else\n raise Marshal::Structure::Error, \"bug: unknown token #{token.inspect}\"\n end\n\n obj.concat rest\n rescue Marshal::Structure::EndOfMarshal\n raise ArgumentError, 'marshal data too short'\n end", "def op_tokenize (p_token, lineno, pos)\n\n\tcase p_token\n\twhen \"=\"\n\t\treturn Token.new(\"T_ASSIGNMENT\", p_token, lineno, pos)\n\twhen \"{\"\n\t\treturn Token.new(\"T_LBRACE\", p_token, lineno, pos)\n\twhen \"}\"\n\t\treturn Token.new(\"T_RBRACE\", p_token, lineno, pos)\n\twhen \"(\"\n\t\treturn Token.new(\"T_LPAREN\", p_token, lineno, pos)\n\twhen \")\"\n\t\treturn Token.new(\"T_RPAREN\", p_token, lineno, pos)\n\twhen \"\\\"\"\n\t\treturn Token.new(\"T_QUOTE\", p_token, lineno, pos)\n\twhen \"==\"\n\t\treturn Token.new(\"T_BOOLOP\", p_token, lineno, pos)\n\twhen \"!=\"\n\t\treturn Token.new(\"T_BOOLOP\", p_token, lineno, pos)\n\twhen \"+\"\n\t\treturn Token.new(\"T_PLUS\", p_token, lineno, pos)\n\twhen \"$\"\n\t\treturn Token.new(\"T_EOFSIGN\", p_token, lineno, pos)\n\telse\n\t\traise UnknownSymbolError.new(p_token, lineno, pos)\n\tend\nend", "def get_tokenize\n @lexeme = nil\n loop {\n case @string\n\t when nil\t\t# the end\n\t @sym = nil\n\t break\n when \"\" # the end\n @sym = nil\n break\n when /\\A[\\r\\n\\t ]+/m\t# skip whitespace\n @string = $'\n when /\\A\\(/m # skip comment\n comment\n when /\\A\"\"/ # skip empty quoted text\n @string = $'\n when /\\A[\\w!$%&\\'*+\\/=?^\\`{\\}|~#-]+/m\n @string = $'\n @sym = SYM_ATOM\n break\n when /\\A\"(.*?([^\\\\]|\\\\\\\\))\"/m\n @string = $'\n @sym = SYM_QTEXT\n @lexeme = $1.gsub(/\\\\(.)/, '\\1')\n break\n when /\\A</\n @string = $'\n @sym = SYM_LESS_THAN\n break\n when /\\A>/\n @string = $'\n @sym = SYM_GREATER_THAN\n break\n when /\\A@/\n @string = $'\n @sym = SYM_AT_SIGN\n break\n when /\\A,/\n @string = $'\n @sym = SYM_COMMA\n break\n when /\\A:/\n @string = $'\n @sym = SYM_COLON\n break\n when /\\A;/\n @string = $'\n @sym = SYM_SEMI_COLON\n break\n when /\\A\\./\n @string = $'\n @sym = SYM_PERIOD\n break\n\t when /\\A(\\[.*?([^\\\\]|\\\\\\\\)\\])/m\n\t @string = $'\n\t @sym = SYM_DOMAIN_LITERAL\n\t @lexeme = $1.gsub(/(^|[^\\\\])[\\r\\n\\t ]+/, '\\1').gsub(/\\\\(.)/, '\\1')\n\t break\n when /\\A[\\200-\\377\\w!$%&\\'*+\\/=?^\\`{\\}|~#-]+/nm\n # This is just like SYM_ATOM, but includes all characters\n # with high bits. This is so we can allow such tokens in\n # the display name portion of an address even though it\n # violates the RFCs.\n @string = $'\n @sym = SYM_ATOM_NON_ASCII\n break\n when /\\A./\n @string = $'\t# garbage\n\t error('garbage character in string')\n else\n raise \"internal error, @string is #{@string.inspect}\"\n end\n }\n if @sym\n @lexeme ||= $&\n end\n end", "def token(type, text)\n value = case type\n when :INTEGER then text.to_i\n when :REAL then text.to_f\n when :STRING then text[1..-2]\n else text\n end\n\n token = Token.new(value, text, @line, @column)\n\n update_counter(token.source)\n\n [type, token]\n end", "def next_item\n lexeme, token = @lexer.next, nil\n if lexeme[0].nil?\n token = { type: :eof }\n elsif lexeme[0].lol_string?\n token = { type: :string, data: lexeme[0][1..-2] }\n elsif lexeme[0].lol_integer?\n token = { type: :integer, data: lexeme[0].to_i }\n elsif lexeme[0].lol_float?\n token = { type: :float, data: lexeme[0].to_f }\n elsif lexeme[0].lol_boolean?\n token = { type: :boolean, data: (lexeme[0] == 'WIN') }\n elsif lexeme[0] == '!'\n token = { type: :exclamation }\n elsif lexeme[0] == \"\\n\"\n token = { type: :newline }\n else\n # Try to match keyword\n token_type = match_longest(lexeme[0], @token_table)\n unless token_type.nil?\n token = { type: token_type }\n # Consume all peeked lexemes\n token_type.to_s.count('_').times { @lexer.next }\n else\n # Try to match identifier\n if lexeme[0].lol_identifier?\n token = { type: :identifier, data: lexeme[0] }\n end\n end\n end\n raise UnknownTokenError.new(lexeme) if token.nil?\n token.merge(line: lexeme[1], pos: lexeme[2])\n end", "def fetch_token(source_part)\n chars = source_part.split(//)\n if (['(', ')'].include?(chars[0]))\n [chars[0], chars[1..-1].join]\n elsif chars[0] == '\"'\n end_quot_pos = chars[1..-1].index {|c| c == '\"'}\n [chars[1..end_quot_pos].join, chars[end_quot_pos+2..-1].join]\n elsif chars[0] == '-'\n chars = chars[1..-1].join.strip.split(//)\n num_str = \"\"\n chars.each {|c|\n if (%w(0 1 2 3 4 5 6 7 8 9)).include?(c)\n num_str << c\n elsif ['(', ')', ' '].include?(c)\n break\n else\n raise \"unexpected token '#{c}'\"\n end\n }\n if num_str.blank?\n raise \"invalid token : -\"\n end\n\n [- num_str.to_i, chars[num_str.length..-1].join]\n elsif %w(0 1 2 3 4 5 6 7 8 9).include?(chars[0])\n num_str = \"\"\n chars.each {|c|\n if (%w(0 1 2 3 4 5 6 7 8 9)).include?(c)\n num_str << c\n elsif ['(', ')', ' '].include?(c)\n break\n else\n raise \"unexpected token '#{c}'\"\n end\n }\n\n [num_str.to_i, chars[num_str.length..-1].join]\n else\n\n token = \"\"\n chars.each {|c|\n if ['(', ')', ' '].include?(c)\n break\n else\n token << c\n end\n }\n\n [token.to_sym, chars[token.length..-1].join]\n end\nend", "def get_token\n return nil if @token_index >= @arguments.size\n\n begin\n case chr(@arguments[@token_index])\n when \"[\"\n return \"statement\", gen_substatement\n\n when \"]\"\n return \"]\"\n\n when \"(\"\n return \"(\", \"(\"\n\n when \")\"\n return \")\", \")\"\n\n when \"n\"\n if (chr(@arguments[@token_index + 1]) == \"o\") && (chr(@arguments[@token_index + 2]) == \"t\") && ((chr(@arguments[@token_index + 3]) == \" \") || (chr(@arguments[@token_index + 3]) == \"(\"))\n @token_index += 2\n return \"not\", \"not\"\n else\n gen_statement\n end\n\n when \"!\"\n return \"not\", \"not\"\n\n when \"a\"\n if (chr(@arguments[@token_index + 1]) == \"n\") && (chr(@arguments[@token_index + 2]) == \"d\") && ((chr(@arguments[@token_index + 3]) == \" \") || (chr(@arguments[@token_index + 3]) == \"(\"))\n @token_index += 2\n return \"and\", \"and\"\n else\n gen_statement\n end\n\n when \"&\"\n if chr(@arguments[@token_index + 1]) == \"&\"\n @token_index += 1\n return \"and\", \"and\"\n else\n gen_statement\n end\n\n when \"o\"\n if (chr(@arguments[@token_index + 1]) == \"r\") && ((chr(@arguments[@token_index + 2]) == \" \") || (chr(@arguments[@token_index + 2]) == \"(\"))\n @token_index += 1\n return \"or\", \"or\"\n else\n gen_statement\n end\n\n when \"|\"\n if chr(@arguments[@token_index + 1]) == \"|\"\n @token_index += 1\n return \"or\", \"or\"\n else\n gen_statement\n end\n\n when \"+\"\n value = \"\"\n i = @token_index + 1\n\n begin\n value += chr(@arguments[i])\n i += 1\n end until (i >= @arguments.size) || (chr(@arguments[i]) =~ /\\s|\\)/)\n\n @token_index = i - 1\n return \"+\", value\n\n when \"-\"\n value = \"\"\n i = @token_index + 1\n\n begin\n value += chr(@arguments[i])\n i += 1\n end until (i >= @arguments.size) || (chr(@arguments[i]) =~ /\\s|\\)/)\n\n @token_index = i - 1\n return \"-\", value\n\n when \" \"\n return \" \", \" \"\n\n else\n gen_statement\n end\n end\n rescue NoMethodError\n raise \"Error. Expression cannot be parsed.\"\n end", "def current_token\n @tokens[@token_index]\n end", "def racc_read_token(t, tok, val); end", "def tokenGetter\n\tif @tokens.length > 0\n\t\tvalue_to_check = @tokens.shift()[0]\n\telse\n\t\treturn [:EOF, \"EOF\"]\n\tend\n\t\n\t$lexemeVal\n\tif value_to_check =~ /^\\+|^\\-/\n\t\t $lexemeVal = :ADDOP\n\telsif value_to_check =~ /^\\*/\n\t\t $lexemeVal = :MUL\n\telsif value_to_check =~ /^\\(/\n\t\t $lexemeVal = :LPAR\n\telsif value_to_check =~ /^\\)/\n\t\t $lexemeVal = :RPAR\n\telsif value_to_check =~ /^;/\n\t\t $lexemeVal = :SEMI\n\telsif value_to_check =~ /^<=|^=/\n\t\t $lexemeVal = :RELOP\n\telsif value_to_check =~ /^:=/\n\t\t $lexemeVal = :ASSIGN\n\telsif value_to_check =~ /^true|^false/\n\t\t $lexemeVal = :BOOL\n\telsif value_to_check =~ /^not/\n\t\t $lexemeVal = :NOT\n\telsif value_to_check =~ /^and/\n\t\t $lexemeVal = :AND\n\telsif value_to_check =~ /^skip/\n\t\t $lexemeVal = :SKIP\n\telsif value_to_check =~ /^if/\n\t\t $lexemeVal = :IF\n\telsif value_to_check =~ /^then/\n\t\t $lexemeVal = :THEN\n\telsif value_to_check =~ /^else/\n\t\t $lexemeVal = :ELSE\n\telsif value_to_check =~ /^do/\n\t\t $lexemeVal = :DO\n\telsif value_to_check =~ /^while/\n\t\t $lexemeVal = :WHILE\n\telsif value_to_check =~ /^[0-9]+/\n\t\t $lexemeVal = :NUM\n\telsif value_to_check =~ /^[a-zA-Z_][a-zA-Z0-9_]*/\n\t\t $lexemeVal = :ID\n\tend\n\treturn [$lexemeVal, value_to_check]\nend", "def if?(token_class = nil, &block)\n block ||= lambda do |token|\n token.class == token_class\n end\n if block.call(@token)\n @token = @lexer.next\n end\n end", "def peek_token\n token = @tokens.first || []\n p :peek => token if @debug\n token\n end", "def peek_token\n token = @tokens.first || []\n p :peek => token if @debug\n token\n end", "def lex_start; end", "def lex_start; end", "def lex_start; end", "def lex_start; end", "def read_next_token(token_class)\n if @next_token\n return @next_token\n else\n # check for a match on the specified class first\n if match(token_class)\n return @next_token\n else\n # now check all the tokens for a match\n Taxonifi::Splitter::Tokens.send(@token_list).each {|t|\n return @next_token if match(t)\n }\n end\n # no match, either end of string or lex-error\n if @input != ''\n raise(Taxonifi::Splitter::SplitterError, \"Lexer Error, unknown token at |#{@input[0..20]}...\", caller)\n else\n return nil\n end\n end\n end", "def next\n @tok ||= read_token\n @tok, tok = nil, @tok\n @prev = tok\n return tok\n end", "def next_token\n\t\t@token = @input.next_token\n\tend", "def next_token\n tokens.shift\n end", "def next_token\n \n # Early return if there is nothing to be read. This means we've reached the end of the file.\n \n unless @file[@pos]\n return nil\n end\n \n # This is the token that will be returned.\n token = Compiler::Token.new\n \n # Initializes a new instance of the automaton.\n automaton = Automaton.new\n \n # Will be set inside the loop, if necessary.\n increment_next = false\n \n # Will be set inside the loop. Marks whether we've reached the end of the file.\n eof = false\n \n # Build a new token while we don't have a new word yet and isn't in the failed state\n while ((automaton.state != :A || automaton.word.empty?) && automaton.state != :failed)\n \n # The next input for the automaton\n char = @file[@pos]\n \n if char\n \n # Moves the pointer to the next char\n @pos += 1\n \n automaton.transition(char)\n \n # While the automaton hasn't started to build a new word yet, increments the line and column numbers.\n # In this phase, we're just skipping blank characters\n if automaton.word.empty?\n if increment_next\n if char == \"\\n\"\n increment_next = true\n else\n increment_next = false\n end\n @line += 1\n @column = 0\n elsif char == \"\\n\"\n @column += 1\n increment_next = true\n else\n @column += 1\n end\n end\n \n else\n eof = true\n puts \"breaking\"\n break\n end\n end\n \n \n \n if eof\n automaton.transition(\"\\n\")\n else\n @pos -= 1\n end\n \n if (automaton.type == :identifier) && (Compiler.reserved_words.is_reserved?(automaton.word))\n token.type = :reserved_word\n else\n token.type = automaton.type\n end\n \n token.value = automaton.word\n token.line = @line\n token.column = @column\n \n return token\n \n end", "def token(content, kind); end", "def next_token\n\t\[email protected]_token\n\tend", "def token; end", "def token; end", "def token; end", "def token; end", "def token; end", "def token; end", "def tokenize_operator(&block) # :yields: SQLTree::Token\n operator = current_char\n if operator == '-' && /[\\d\\.]/ =~ peek_char\n tokenize_number(&block)\n else\n operator << next_char if SQLTree::Token::OPERATORS_HASH.has_key?(operator + peek_char)\n operator_class = SQLTree::Token.const_get(SQLTree::Token::OPERATORS_HASH[operator].to_s.upcase)\n handle_token(operator_class.new(operator), &block)\n end\n end", "def run(source, until_token = :invalid, token_count = nil)\n @at_end = false\n @source = source\n @reader = source.each_char\n\n read_next()\n\n while token_count == nil || token_count > 0\n skip_whitespace()\n current = @marker.character\n break unless current\n\n token = Token.new\n token.kind = :invalid\n token.from = @marker.source_index\n token.position = @marker.position.dup\n\n case current\n when ?\", ?'\n read_string(token)\n\n when ?0\n case peek_next()\n when ?x, ?X, ?b, ?B then read_base_number(token)\n else read_number(token)\n end\n\n when ?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9\n read_number(token)\n\n # dot, double dot, triple dot, and floats beginning with a dot\n when ?.\n token.kind = :dot\n case peek_next()\n when ?0, ?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9 then read_number(token)\n when ?.\n read_next()\n token.kind = :double_dot\n\n if peek_next() == ?.\n read_next()\n token.kind = :triple_dot\n end\n\n token.value = Token::DESCRIPTORS[token.kind]\n else\n token.value = Token::DESCRIPTORS[token.kind]\n end\n\n when ?_, ?a, ?b, ?c, ?d, ?e, ?f, ?g, ?h, ?i, ?j, ?k, ?l, ?m, ?n, ?o, ?p,\n ?q, ?r, ?s, ?t, ?u, ?v, ?w, ?x, ?y, ?z, ?A, ?B, ?C, ?D, ?E, ?F, ?G, ?H,\n ?I, ?J, ?K, ?L, ?M, ?N, ?O, ?P, ?Q, ?R, ?S, ?T, ?U, ?V, ?W, ?X, ?Y, ?Z\n read_word(token)\n\n when ?\\n\n token.value = current\n token.kind = :newline\n\n when ??, ?#, ?@, ?$, ?%, ?(, ?), ?[, ?], ?{, ?}, ?^, ?~, ?`, ?\\\\, ?,, ?;\n token.value = current\n token.kind = PUNCTUATION_STRINGS[current]\n\n when ?=, ?|, ?&, ?:, ?+, ?*\n current << read_next() if peek_next() == current\n token.value = current\n token.kind = PUNCTUATION_STRINGS[current]\n\n when ?!\n current << read_next() if peek_next() == ?=\n token.value = current\n token.kind = PUNCTUATION_STRINGS[current]\n\n when ?>, ?<\n case peek_next()\n when ?=, current then current << read_next()\n end\n token.value = current\n token.kind = PUNCTUATION_STRINGS[current]\n\n when ?-\n case peek_next()\n when ?>, current then current << read_next()\n end\n token.value = current\n token.kind = PUNCTUATION_STRINGS[current]\n\n when ?/\n case peek_next()\n when ?/ then read_line_comment(token)\n when ?* then read_block_comment(token)\n else\n token.value = Token::DESCRIPTORS[token.kind = :slash]\n read_next()\n end\n\n end # case current\n\n token.to = @marker.source_index\n last_kind = token.kind\n if !(@skip_comments && token.comment?) && !(@skip_newlines && token.newline?)\n if last_kind != :invalid\n @tokens << token\n yield token if block_given?\n else\n raise RuntimeError, \"#{token.position} Invalid token: #{token.inspect}\"\n end\n end\n\n break if until_token == last_kind\n\n read_next()\n token_count -= 1 unless token_count.nil?\n end # while current && token_count > 0\n\n @source = nil\n @reader = nil\n\n self\n end", "def tokenType(token)\n\n tag = case token\n when /^(class|constructor|function|method|field|static|var|int|char|boolean|void|true|false|null|this|let|do|if|else|while|return)$/ then :keyword\n\n when /\\{|\\}|\\(|\\)|\\[|\\]|\\.|\\,|\\;|\\+|\\-|\\*|\\/|\\&|\\||\\<|\\>|\\=|\\~/ then :symbol\n\n when /^\\d+/ then :integerConstant\n when /\\\"/ then :stringConstant\n when /^[^\\d]\\w*/ then :identifier\n else :error\n end\n\n return tag\n\n end", "def next_token\n @tokens.shift\n end", "def next_token\n @state = 1\n value = \"\"\n recovery_data = [0, 0]\n\n while [email protected]?\n char = @stream.read(1)\n next_state = get_next_state(char)\n\n # Move to the next state.\n if next_state\n if recognizable?\n recovery_data = [@state, 0]\n end\n\n value << char\n recovery_data[1] += 1\n @state = next_state\n else\n # Recognise the final token.\n if recognizable?\n @stream.seek(@stream.pos - 1)\n break\n else\n # Recoverable error.\n if recovery_data[0] > 0\n value = recover_from_error!(recovery_data, value)\n break\n # Fatal lexical error.\n else\n raise Bolverk::ASM::LexicalError, \"Disallowed token: #{char} on line #{@stream.line_number}\"\n end\n end\n end\n end\n\n build_token(value)\n end", "def next\n\t\tif @next_token\n\t\t\ttoken = @next_token\n\t\t\t@next_token = nil\n\t\t\treturn token\n\t\telse\n\t\t\ttoken = read_token\n\t\t\treturn token\n\t\tend\n\tend", "def token_pos offset\n [offset - @line_pos, @line]\n end", "def peek_next\n fail 'No string specified' unless @str\n\n return Token.new(:eos) if skip_space == :eos\n\n PATTERNS.each do |re, func|\n re.match(@str) do |mat|\n @last_re = re # This is what will be removed\n mat = mat.to_s\n return func.is_a?(Symbol) ? send(func, mat) : instance_exec(mat, &func)\n end\n end\n end", "def consume\n @current = @tokens[@pos]\n @pos += 1 if @current\n @current\n end", "def friendly_token(length = T.unsafe(nil)); end", "def skip token_type, error = true\n type, = get\n return unless type # end of stream\n return @current_token if token_type == type\n unget\n raise ParseError, \"expected #{token_type} got #{@current_token.inspect}\" if error\n end", "def lex_en_expr_beg; end" ]
[ "0.6609497", "0.6435141", "0.6293766", "0.62336534", "0.61545026", "0.61502826", "0.61282426", "0.6104507", "0.6080465", "0.60489666", "0.604669", "0.60290664", "0.601942", "0.5971353", "0.5968677", "0.58435327", "0.5819494", "0.5810206", "0.5802134", "0.57788855", "0.5751459", "0.57394147", "0.5699651", "0.5695514", "0.5695483", "0.5678967", "0.5669759", "0.5640305", "0.5634484", "0.56333715", "0.55887115", "0.55835956", "0.5583528", "0.5575121", "0.5575121", "0.5575121", "0.55695546", "0.5549067", "0.5546892", "0.55458516", "0.5538177", "0.55346495", "0.55313116", "0.55124176", "0.5510653", "0.55010486", "0.54994553", "0.54979026", "0.5480085", "0.54769987", "0.5469188", "0.5463456", "0.54563594", "0.54527646", "0.54302526", "0.54066277", "0.53958195", "0.53895134", "0.53831404", "0.5381769", "0.5376501", "0.53685856", "0.5366176", "0.5364848", "0.5362861", "0.5360146", "0.53599626", "0.5352648", "0.5351657", "0.535106", "0.535106", "0.53323054", "0.53323054", "0.53323054", "0.53323054", "0.53231585", "0.53225684", "0.5307423", "0.5303949", "0.5297181", "0.52945113", "0.52873063", "0.5281515", "0.5281515", "0.5281515", "0.5281515", "0.5281515", "0.5281515", "0.52651983", "0.5263449", "0.5251384", "0.5237693", "0.52347463", "0.5233358", "0.5225163", "0.5221728", "0.5219397", "0.5212675", "0.52120876", "0.5210649" ]
0.5511946
44
Tests next literal is IdentifierName or not If literal is IdentifierName return ECMA262::IdentifierName object and forward lexical parser position. Otherwise return nil and position is not changed.
def identifier_name return nil if (code = @codes[@pos]).nil? pos0 = @pos chars = [] if code == 0x5c and ucode = unicode_escape? and identifier_start?(ucode) chars.push(ucode) @pos += 6 elsif identifier_start?(code) chars.push(code) @pos += 1 else return nil end while true code = @codes[@pos] if code == 0x5c and ucode = unicode_escape? and identifier_part?(ucode) chars.push(ucode) @pos += 6 elsif identifier_part?(code) chars.push(code) @pos += 1 else name = chars.pack("U*").to_sym return ECMA262::IdentifierName.get(name) end end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start_identifier?(text = nil)\n text = @s.current + @s.peek(2) if text.nil?\n\n case text[0]\n when '-'\n nextChar = text[1]\n !!(nextChar == '-' || nextChar =~ RE_NAME_START || valid_escape?(text[1, 2]))\n\n when RE_NAME_START\n true\n\n when '\\\\'\n valid_escape?(text[0, 2])\n\n else\n false\n end\n end", "def start_identifier?(text = T.unsafe(nil)); end", "def start_identifier?(text = nil)\n text = @s.peek(3) if text.nil?\n\n case text[0]\n when '-'\n !!(text[1] =~ RE_NAME_START || valid_escape?(text[1, 2]))\n\n when RE_NAME_START\n true\n\n when '\\\\'\n valid_escape?(text[0, 2])\n\n else\n false\n end\n end", "def consume_ident\n value = consume_name\n\n if @s.peek == '('\n @s.consume\n\n if value.downcase == 'url'\n consume_url\n else\n create_token(:function, :value => value)\n end\n else\n create_token(:ident, :value => value)\n end\n end", "def nextName\n\t while @currInput.pos >= @currInput.length\n\t\[email protected]()\n\t\t@currInput = @inputStack.last\n\t end\n\n\t @currInput.string[@currInput.pos .. -1] =~ NAME_REGEX\n\t name = $1\n\t if name.nil?\n\t\tstr = \"expected name but saw illegal non-name character\"\n\t\traise ParserError.new(str, self)\n\t end\n\t skipChars(name.length)\n\t return name\n\tend", "def consume_ident\n value = consume_name\n\n if @s.peek == '('\n @s.consume\n\n if value.downcase == 'url'\n @s.consume while @s.peek(2) =~ RE_WHITESPACE_ANCHORED\n\n if @s.peek(2) =~ RE_QUOTED_URL_START\n create_token(:function, :value => value)\n else\n consume_url\n end\n else\n create_token(:function, :value => value)\n end\n else\n create_token(:ident, :value => value)\n end\n end", "def __ident__() @__grammar__.ident end", "def consume_ident; end", "def next_token\n return @extra_tokens.pop unless @extra_tokens.empty?\n\n skip_whitespace\n c = @sql[@pos, 1]\n return next_string(c) if quote?(c)\n\n first_is_identifier_char = identifier_char?(c)\n t = c\n @pos += 1\n while @pos < @length\n c = @sql[@pos, 1]\n break if c == ' '\n\n this_is_identifier_char = identifier_char?(c)\n break if first_is_identifier_char != this_is_identifier_char && @length > 0\n break if !this_is_identifier_char && quote?(c)\n\n t << c\n @pos += 1\n end\n\n case t\n when ''\n nil\n when /^\\d+$/\n t.to_i\n else\n t\n end\n end", "def fragment_of_variable_or_method_name(name)\n \t_IDENTIFIER14 = nil\n\n\n\n\n # 269:7: '.' IDENTIFIER\n match(:DOT)\n _IDENTIFIER14 = @input.look_ahead(1)\n match(:IDENTIFIER)\n name << \".#{_IDENTIFIER14.text}\"\n\n\n\n end", "def name!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 44 )\n\n type = NAME\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 383:7: ( LETTER | '_' | ':' ) ( NAMECHAR )*\n if @input.peek(1) == 0x3a || @input.peek( 1 ).between?( 0x41, 0x5a ) || @input.peek(1) == 0x5f || @input.peek( 1 ).between?( 0x61, 0x7a )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n # at line 383:29: ( NAMECHAR )*\n while true # decision 6\n alt_6 = 2\n look_6_0 = @input.peek( 1 )\n\n if ( look_6_0.between?( 0x2d, 0x2e ) || look_6_0.between?( 0x30, 0x3a ) || look_6_0.between?( 0x41, 0x5a ) || look_6_0 == 0x5f || look_6_0.between?( 0x61, 0x7a ) )\n alt_6 = 1\n\n end\n case alt_6\n when 1\n # at line 383:30: NAMECHAR\n namechar!\n\n else\n break # out of loop for decision 6\n end\n end # loop for decision 6\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 44 )\n\n end", "def tokenize_quoted_identifier(&block) # :yields: SQLTree::Token::Identifier\n variable = ''\n until next_char.nil? || current_char == SQLTree.identifier_quote_char # TODO: allow MySQL quoting mode\n variable << (current_char == \"\\\\\" ? next_char : current_char)\n end\n handle_token(SQLTree::Token::Identifier.new(variable), &block)\n end", "def token\n identifier_name || numeric_literal || punctuator || string_literal\n end", "def variable_name\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 83 )\n return_value = VariableNameReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n root_0 = nil\n __ID425__ = nil\n t = nil\n\n tree_for_ID425 = nil\n\n begin\n # at line 821:3: ( ID | t= pseudokeyword )\n alt_107 = 2\n look_107_0 = @input.peek( 1 )\n\n if ( look_107_0 == ID )\n alt_107 = 1\n elsif ( look_107_0 == GET || look_107_0 == SET || look_107_0 == MACRO || look_107_0 == EACH || look_107_0.between?( DEF, OBJECT_DEF ) || look_107_0.between?( T__148, T__150 ) )\n alt_107 = 2\n else\n @state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )\n\n raise NoViableAlternative( \"\", 107, 0 )\n end\n case alt_107\n when 1\n root_0 = @adaptor.create_flat_list\n\n\n # at line 821:5: ID\n __ID425__ = match( ID, TOKENS_FOLLOWING_ID_IN_variable_name_5838 )\n if @state.backtracking == 0\n\n tree_for_ID425 = @adaptor.create_with_payload( __ID425__ )\n @adaptor.add_child( root_0, tree_for_ID425 )\n\n end\n\n when 2\n root_0 = @adaptor.create_flat_list\n\n\n # at line 822:5: t= pseudokeyword\n @state.following.push( TOKENS_FOLLOWING_pseudokeyword_IN_variable_name_5846 )\n t = pseudokeyword\n @state.following.pop\n if @state.backtracking == 0\n @adaptor.add_child( root_0, t.tree )\n end\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n t.tree.token.type = ID \n # <-- action\n end\n\n end# - - - - - - - rule clean up - - - - - - - -\n return_value.stop = @input.look( -1 )\n\n if @state.backtracking == 0\n\n return_value.tree = @adaptor.rule_post_processing( root_0 )\n @adaptor.set_token_boundaries( return_value.tree, return_value.start, return_value.stop )\n\n end\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n return_value.tree = @adaptor.create_error_node( @input, return_value.start, @input.look(-1), re )\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 83 )\n\n end\n \n return return_value\n end", "def lexer_name_for(*args)\n # Pop off the last arg if it's a hash, which becomes our opts\n if args.last.is_a?(Hash)\n opts = args.pop\n else\n opts = {}\n end\n\n if args.last.is_a?(String)\n code = args.pop\n else\n code = nil\n end\n\n mentos(:lexer_name_for, args, opts, code)\n end", "def extract_node_name(ast)\n case ast.type\n when :symbol_literal\n ast.jump(:ident).source.to_sym\n when :string_literal\n ast.jump(:tstring_content).source\n else\n nil\n end\n end", "def next_identifier\n if @identifier >= @max_identifier\n @identifier = 1\n else\n @identifier += 1\n end\n end", "def parse_interp_ident\n init_scanner!\n interp_ident\n end", "def handle_ident(identifier, lineno_column)\n ContextVariable.new identifier\n end", "def identifier(name)\n SQL::Identifier.new(name)\n end", "def get_name\n la = $lookahead\n\n return expected(\"Name\") unless is_alpha(la)\n\n lookahead\n\n \"_#{la}\"\nend", "def get_name\n la = $lookahead\n\n return expected(\"Name\") unless is_alpha(la)\n\n lookahead\n\n \"_#{la}\"\nend", "def ident_check sym\n valid, scope = @stack.search sym, true\n NamingError.log(\"Line #{sym.line_number}: No identifier '#{sym.text}' in current scope '#{@current_level}'\") if !valid\n \n scope\n end", "def literal\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 33 )\n return_value = LiteralReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n root_0 = nil\n\n _last = _first_0 = nil\n string_literal267 = nil\n __IVAR268__ = nil\n __ID269__ = nil\n string_literal270 = nil\n string_literal271 = nil\n string_literal272 = nil\n string_literal273 = nil\n __NUMBER274__ = nil\n __STRING275__ = nil\n __DOC276__ = nil\n __REGEX277__ = nil\n __ARRAY278__ = nil\n __OBJECT280__ = nil\n string_literal282 = nil\n __ID283__ = nil\n string_literal286 = nil\n argument279 = nil\n property_definition281 = nil\n parameters284 = nil\n statement_block285 = nil\n parameters287 = nil\n statement_block288 = nil\n\n tree_for_string_literal267 = nil\n tree_for_IVAR268 = nil\n tree_for_ID269 = nil\n tree_for_string_literal270 = nil\n tree_for_string_literal271 = nil\n tree_for_string_literal272 = nil\n tree_for_string_literal273 = nil\n tree_for_NUMBER274 = nil\n tree_for_STRING275 = nil\n tree_for_DOC276 = nil\n tree_for_REGEX277 = nil\n tree_for_ARRAY278 = nil\n tree_for_OBJECT280 = nil\n tree_for_string_literal282 = nil\n tree_for_ID283 = nil\n tree_for_string_literal286 = nil\n\n begin\n # at line 229:3: ( 'this' | IVAR | ID | 'null' | 'true' | 'false' | 'undefined' | NUMBER | STRING | DOC | REGEX | ^( ARRAY ( argument )* ) | ^( OBJECT ( property_definition )* ) | ^( 'function' ( ID )? parameters statement_block ) | ^( '->' ( parameters )? statement_block ) )\n alt_41 = 15\n case look_41 = @input.peek( 1 )\n when THIS then alt_41 = 1\n when IVAR then alt_41 = 2\n when ID then alt_41 = 3\n when NULL then alt_41 = 4\n when TRUE then alt_41 = 5\n when FALSE then alt_41 = 6\n when UNDEFINED then alt_41 = 7\n when NUMBER then alt_41 = 8\n when STRING then alt_41 = 9\n when DOC then alt_41 = 10\n when REGEX then alt_41 = 11\n when ARRAY then alt_41 = 12\n when OBJECT then alt_41 = 13\n when FUNCTION then alt_41 = 14\n when ARROW then alt_41 = 15\n else\n raise NoViableAlternative( \"\", 41, 0 )\n end\n case alt_41\n when 1\n root_0 = @adaptor.create_flat_list\n\n\n # at line 229:5: 'this'\n _last = @input.look\n string_literal267 = match( THIS, TOKENS_FOLLOWING_THIS_IN_literal_1643 )\n\n tree_for_string_literal267 = @adaptor.copy_node( string_literal267 )\n\n @adaptor.add_child( root_0, tree_for_string_literal267 )\n\n\n\n when 2\n root_0 = @adaptor.create_flat_list\n\n\n # at line 230:5: IVAR\n _last = @input.look\n __IVAR268__ = match( IVAR, TOKENS_FOLLOWING_IVAR_IN_literal_1649 )\n\n tree_for_IVAR268 = @adaptor.copy_node( __IVAR268__ )\n\n @adaptor.add_child( root_0, tree_for_IVAR268 )\n\n\n\n when 3\n root_0 = @adaptor.create_flat_list\n\n\n # at line 231:5: ID\n _last = @input.look\n __ID269__ = match( ID, TOKENS_FOLLOWING_ID_IN_literal_1655 )\n\n tree_for_ID269 = @adaptor.copy_node( __ID269__ )\n\n @adaptor.add_child( root_0, tree_for_ID269 )\n\n\n\n when 4\n root_0 = @adaptor.create_flat_list\n\n\n # at line 232:5: 'null'\n _last = @input.look\n string_literal270 = match( NULL, TOKENS_FOLLOWING_NULL_IN_literal_1661 )\n\n tree_for_string_literal270 = @adaptor.copy_node( string_literal270 )\n\n @adaptor.add_child( root_0, tree_for_string_literal270 )\n\n\n\n when 5\n root_0 = @adaptor.create_flat_list\n\n\n # at line 233:5: 'true'\n _last = @input.look\n string_literal271 = match( TRUE, TOKENS_FOLLOWING_TRUE_IN_literal_1667 )\n\n tree_for_string_literal271 = @adaptor.copy_node( string_literal271 )\n\n @adaptor.add_child( root_0, tree_for_string_literal271 )\n\n\n\n when 6\n root_0 = @adaptor.create_flat_list\n\n\n # at line 234:5: 'false'\n _last = @input.look\n string_literal272 = match( FALSE, TOKENS_FOLLOWING_FALSE_IN_literal_1673 )\n\n tree_for_string_literal272 = @adaptor.copy_node( string_literal272 )\n\n @adaptor.add_child( root_0, tree_for_string_literal272 )\n\n\n\n when 7\n root_0 = @adaptor.create_flat_list\n\n\n # at line 235:5: 'undefined'\n _last = @input.look\n string_literal273 = match( UNDEFINED, TOKENS_FOLLOWING_UNDEFINED_IN_literal_1679 )\n\n tree_for_string_literal273 = @adaptor.copy_node( string_literal273 )\n\n @adaptor.add_child( root_0, tree_for_string_literal273 )\n\n\n\n when 8\n root_0 = @adaptor.create_flat_list\n\n\n # at line 236:5: NUMBER\n _last = @input.look\n __NUMBER274__ = match( NUMBER, TOKENS_FOLLOWING_NUMBER_IN_literal_1685 )\n\n tree_for_NUMBER274 = @adaptor.copy_node( __NUMBER274__ )\n\n @adaptor.add_child( root_0, tree_for_NUMBER274 )\n\n\n\n when 9\n root_0 = @adaptor.create_flat_list\n\n\n # at line 237:5: STRING\n _last = @input.look\n __STRING275__ = match( STRING, TOKENS_FOLLOWING_STRING_IN_literal_1691 )\n\n tree_for_STRING275 = @adaptor.copy_node( __STRING275__ )\n\n @adaptor.add_child( root_0, tree_for_STRING275 )\n\n\n\n when 10\n root_0 = @adaptor.create_flat_list\n\n\n # at line 238:5: DOC\n _last = @input.look\n __DOC276__ = match( DOC, TOKENS_FOLLOWING_DOC_IN_literal_1697 )\n\n tree_for_DOC276 = @adaptor.copy_node( __DOC276__ )\n\n @adaptor.add_child( root_0, tree_for_DOC276 )\n\n\n\n when 11\n root_0 = @adaptor.create_flat_list\n\n\n # at line 239:5: REGEX\n _last = @input.look\n __REGEX277__ = match( REGEX, TOKENS_FOLLOWING_REGEX_IN_literal_1703 )\n\n tree_for_REGEX277 = @adaptor.copy_node( __REGEX277__ )\n\n @adaptor.add_child( root_0, tree_for_REGEX277 )\n\n\n\n when 12\n root_0 = @adaptor.create_flat_list\n\n\n # at line 240:5: ^( ARRAY ( argument )* )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __ARRAY278__ = match( ARRAY, TOKENS_FOLLOWING_ARRAY_IN_literal_1711 )\n\n tree_for_ARRAY278 = @adaptor.copy_node( __ARRAY278__ )\n\n root_1 = @adaptor.become_root( tree_for_ARRAY278, root_1 )\n\n\n\n if @input.peek == DOWN\n match( DOWN, nil )\n # at line 240:14: ( argument )*\n while true # decision 37\n alt_37 = 2\n look_37_0 = @input.peek( 1 )\n\n if ( look_37_0.between?( AMP, AMP_ASGN ) || look_37_0 == POST_DECR || look_37_0.between?( GEQ, AREF ) || look_37_0.between?( GREATER, HAT ) || look_37_0.between?( ARROW, HAT_ASGN ) || look_37_0 == ASGN || look_37_0 == REGEX || look_37_0 == IN || look_37_0 == INCR || look_37_0.between?( INSTANCEOF, RSHIFT3 ) || look_37_0 == RSHIFT3_ASGN || look_37_0.between?( RSHIFT_ASGN, COLON ) || look_37_0 == LEQ || look_37_0.between?( LESS, SLASH ) || look_37_0 == SLASH_ASGN || look_37_0.between?( STAR, DECR ) || look_37_0 == STAR_ASGN || look_37_0 == LSHIFT || look_37_0.between?( DELETE, THIS ) || look_37_0.between?( MINUS, TILDE ) || look_37_0.between?( MINUS_ASGN, MOD ) || look_37_0.between?( MOD_ASGN, TYPEOF ) || look_37_0.between?( NEQ, UMINUS ) || look_37_0.between?( NEQQ, UNDEFINED ) || look_37_0 == NEW || look_37_0 == NOT || look_37_0.between?( NULL, UPLUS ) || look_37_0 == OBJECT || look_37_0.between?( EQ, OR_ASGN ) || look_37_0 == FALSE || look_37_0 == PIPE || look_37_0 == PIPE_ASGN || look_37_0 == PLUS || look_37_0.between?( ID, DOC ) )\n alt_37 = 1\n\n end\n case alt_37\n when 1\n # at line 240:14: argument\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_argument_IN_literal_1713 )\n argument279 = argument\n @state.following.pop\n\n @adaptor.add_child( root_1, argument279.tree )\n\n\n else\n break # out of loop for decision 37\n end\n end # loop for decision 37\n\n match( UP, nil )\n end\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 13\n root_0 = @adaptor.create_flat_list\n\n\n # at line 241:5: ^( OBJECT ( property_definition )* )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __OBJECT280__ = match( OBJECT, TOKENS_FOLLOWING_OBJECT_IN_literal_1724 )\n\n tree_for_OBJECT280 = @adaptor.copy_node( __OBJECT280__ )\n\n root_1 = @adaptor.become_root( tree_for_OBJECT280, root_1 )\n\n\n\n if @input.peek == DOWN\n match( DOWN, nil )\n # at line 241:15: ( property_definition )*\n while true # decision 38\n alt_38 = 2\n look_38_0 = @input.peek( 1 )\n\n if ( look_38_0 == GET || look_38_0 == COLON || look_38_0 == SET )\n alt_38 = 1\n\n end\n case alt_38\n when 1\n # at line 241:15: property_definition\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_property_definition_IN_literal_1726 )\n property_definition281 = property_definition\n @state.following.pop\n\n @adaptor.add_child( root_1, property_definition281.tree )\n\n\n else\n break # out of loop for decision 38\n end\n end # loop for decision 38\n\n match( UP, nil )\n end\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 14\n root_0 = @adaptor.create_flat_list\n\n\n # at line 242:5: ^( 'function' ( ID )? parameters statement_block )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal282 = match( FUNCTION, TOKENS_FOLLOWING_FUNCTION_IN_literal_1737 )\n\n tree_for_string_literal282 = @adaptor.copy_node( string_literal282 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal282, root_1 )\n\n\n\n match( DOWN, nil )\n # at line 242:19: ( ID )?\n alt_39 = 2\n look_39_0 = @input.peek( 1 )\n\n if ( look_39_0 == ID )\n alt_39 = 1\n end\n case alt_39\n when 1\n # at line 242:19: ID\n _last = @input.look\n __ID283__ = match( ID, TOKENS_FOLLOWING_ID_IN_literal_1739 )\n\n tree_for_ID283 = @adaptor.copy_node( __ID283__ )\n\n @adaptor.add_child( root_1, tree_for_ID283 )\n\n\n\n end\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_parameters_IN_literal_1742 )\n parameters284 = parameters\n @state.following.pop\n\n @adaptor.add_child( root_1, parameters284.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_statement_block_IN_literal_1744 )\n statement_block285 = statement_block\n @state.following.pop\n\n @adaptor.add_child( root_1, statement_block285.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 15\n root_0 = @adaptor.create_flat_list\n\n\n # at line 243:5: ^( '->' ( parameters )? statement_block )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal286 = match( ARROW, TOKENS_FOLLOWING_ARROW_IN_literal_1754 )\n\n tree_for_string_literal286 = @adaptor.copy_node( string_literal286 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal286, root_1 )\n\n\n\n match( DOWN, nil )\n # at line 243:13: ( parameters )?\n alt_40 = 2\n look_40_0 = @input.peek( 1 )\n\n if ( look_40_0 == PARAMS )\n alt_40 = 1\n end\n case alt_40\n when 1\n # at line 243:13: parameters\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_parameters_IN_literal_1756 )\n parameters287 = parameters\n @state.following.pop\n\n @adaptor.add_child( root_1, parameters287.tree )\n\n\n end\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_statement_block_IN_literal_1759 )\n statement_block288 = statement_block\n @state.following.pop\n\n @adaptor.add_child( root_1, statement_block288.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n end\n return_value.tree = @adaptor.rule_post_processing( root_0 )\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 33 )\n\n end\n \n return return_value\n end", "def identifier_string\n name\n end", "def on_ident(node, compiled_grammar)\n return node.children[0]\n end", "def try_declaration\n # This allows the \"*prop: val\", \":prop: val\", \"#prop: val\", and \".prop:\n # val\" hacks.\n name_start_pos = source_position\n if (s = tok(/[:\\*\\.]|\\#(?!\\{)/))\n name = [s, str {ss}]\n return name unless (ident = interp_ident)\n name << ident\n else\n return unless (name = interp_ident)\n name = Array(name)\n end\n\n if (comment = tok(COMMENT))\n name << comment\n end\n name_end_pos = source_position\n\n mid = [str {ss}]\n return name + mid unless tok(/:/)\n mid << ':'\n\n # If this is a CSS variable, parse it as a property no matter what.\n if name.first.is_a?(String) && name.first.start_with?(\"--\")\n return css_variable_declaration(name, name_start_pos, name_end_pos)\n end\n\n return name + mid + [':'] if tok(/:/)\n mid << str {ss}\n post_colon_whitespace = !mid.last.empty?\n could_be_selector = !post_colon_whitespace && (tok?(IDENT_START) || tok?(INTERP_START))\n\n value_start_pos = source_position\n value = nil\n error = catch_error do\n value = value!\n if tok?(/\\{/)\n # Properties that are ambiguous with selectors can't have additional\n # properties nested beneath them.\n tok!(/;/) if could_be_selector\n elsif !tok?(/[;{}]/)\n # We want an exception if there's no valid end-of-property character\n # exists, but we don't want to consume it if it does.\n tok!(/[;{}]/)\n end\n end\n\n if error\n rethrow error unless could_be_selector\n\n # If the value would be followed by a semicolon, it's definitely\n # supposed to be a property, not a selector.\n additional_selector = almost_any_value\n rethrow error if tok?(/;/)\n\n return name + mid + (additional_selector || [])\n end\n\n value_end_pos = source_position\n ss\n require_block = tok?(/\\{/)\n\n node = node(Sass::Tree::PropNode.new(name.flatten.compact, [value], :new),\n name_start_pos, value_end_pos)\n node.name_source_range = range(name_start_pos, name_end_pos)\n node.value_source_range = range(value_start_pos, value_end_pos)\n\n return node unless require_block\n nested_properties! node\n end", "def ident!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 47 )\n\n\n\n type = IDENT\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 226:8: ( 'a' .. 'z' | 'A' .. 'Z' | '_' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )*\n if @input.peek( 1 ).between?( 0x41, 0x5a ) || @input.peek(1) == 0x5f || @input.peek( 1 ).between?( 0x61, 0x7a )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n # at line 226:40: ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' )*\n while true # decision 4\n alt_4 = 2\n look_4_0 = @input.peek( 1 )\n\n if ( look_4_0.between?( 0x30, 0x39 ) || look_4_0.between?( 0x41, 0x5a ) || look_4_0 == 0x5f || look_4_0.between?( 0x61, 0x7a ) )\n alt_4 = 1\n\n end\n case alt_4\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x30, 0x39 ) || @input.peek( 1 ).between?( 0x41, 0x5a ) || @input.peek(1) == 0x5f || @input.peek( 1 ).between?( 0x61, 0x7a )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n break # out of loop for decision 4\n end\n end # loop for decision 4\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 47 )\n\n\n end", "def ace?\n :ace == @identifier\n end", "def parse_lit\n case l.front.type\n when :str then parse_str_lit\n when :chr then parse_char_lit\n when :num then parse_num_lit\n else\n error \"expected a literal\"\n end\n end", "def visit_VarDeclNode(o)\n unless @preserved_identifiers.include?(o.name)\n o.name = JSObfu::Utils::random_var_encoding(rename_var(o.name))\n end\n\n super\n end", "def identifier_char?(c)\n c =~ /[\\.a-zA-Z0-9_]/ ? true : false\n end", "def declaration_key\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 24 )\n return_value = DeclarationKeyReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n root_0 = nil\n __ID90__ = nil\n __STRING91__ = nil\n __NUMBER92__ = nil\n char_literal94 = nil\n reserved93 = nil\n declaration_target95 = nil\n\n tree_for_ID90 = nil\n tree_for_STRING91 = nil\n tree_for_NUMBER92 = nil\n tree_for_char_literal94 = nil\n\n begin\n root_0 = @adaptor.create_flat_list\n\n\n # at line 405:5: ( ID | STRING | NUMBER | reserved ) ':' declaration_target\n # at line 405:5: ( ID | STRING | NUMBER | reserved )\n alt_17 = 4\n case look_17 = @input.peek( 1 )\n when ID then alt_17 = 1\n when STRING then alt_17 = 2\n when NUMBER then alt_17 = 3\n when GET, IF, IN, BREAK, INSTANCEOF, RETURN, CASE, CATCH, SET, CONTINUE, LET, DEFAULT, DELETE, SWITCH, THIS, DO, THROW, TRUE, TRY, TYPEOF, NEW, EACH, UNDEFINED, ELSE, NULL, UNLESS, UNTIL, FALSE, VAR, FINALLY, VOID, FOR, WHILE, WITH, FUNCTION, YIELD, DEF, CLASS_DEF, OBJECT_DEF, T__148, T__149, T__150 then alt_17 = 4\n else\n @state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )\n\n raise NoViableAlternative( \"\", 17, 0 )\n end\n case alt_17\n when 1\n # at line 405:7: ID\n __ID90__ = match( ID, TOKENS_FOLLOWING_ID_IN_declaration_key_2680 )\n if @state.backtracking == 0\n\n tree_for_ID90 = @adaptor.create_with_payload( __ID90__ )\n @adaptor.add_child( root_0, tree_for_ID90 )\n\n end\n\n when 2\n # at line 405:12: STRING\n __STRING91__ = match( STRING, TOKENS_FOLLOWING_STRING_IN_declaration_key_2684 )\n if @state.backtracking == 0\n\n tree_for_STRING91 = @adaptor.create_with_payload( __STRING91__ )\n @adaptor.add_child( root_0, tree_for_STRING91 )\n\n end\n\n when 3\n # at line 405:21: NUMBER\n __NUMBER92__ = match( NUMBER, TOKENS_FOLLOWING_NUMBER_IN_declaration_key_2688 )\n if @state.backtracking == 0\n\n tree_for_NUMBER92 = @adaptor.create_with_payload( __NUMBER92__ )\n @adaptor.add_child( root_0, tree_for_NUMBER92 )\n\n end\n\n when 4\n # at line 405:30: reserved\n @state.following.push( TOKENS_FOLLOWING_reserved_IN_declaration_key_2692 )\n reserved93 = reserved\n @state.following.pop\n if @state.backtracking == 0\n @adaptor.add_child( root_0, reserved93.tree )\n end\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n reserved93.tree.token.type = ID \n # <-- action\n end\n\n end\n char_literal94 = match( COLON, TOKENS_FOLLOWING_COLON_IN_declaration_key_2698 )\n if @state.backtracking == 0\n\n tree_for_char_literal94 = @adaptor.create_with_payload( char_literal94 )\n root_0 = @adaptor.become_root( tree_for_char_literal94, root_0 )\n\n end\n @state.following.push( TOKENS_FOLLOWING_declaration_target_IN_declaration_key_2701 )\n declaration_target95 = declaration_target\n @state.following.pop\n if @state.backtracking == 0\n @adaptor.add_child( root_0, declaration_target95.tree )\n end\n # - - - - - - - rule clean up - - - - - - - -\n return_value.stop = @input.look( -1 )\n\n if @state.backtracking == 0\n\n return_value.tree = @adaptor.rule_post_processing( root_0 )\n @adaptor.set_token_boundaries( return_value.tree, return_value.start, return_value.stop )\n\n end\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n return_value.tree = @adaptor.create_error_node( @input, return_value.start, @input.look(-1), re )\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 24 )\n\n end\n \n return return_value\n end", "def ident\n name = get_name\n if @look == '('\n match '('\n match ')'\n emit_ln \"BSR #{name}\"\n else\n emit_ln \"MOVE #{name} (PC),D0\"\n end\nend", "def in_first_of_expression\n case @enum.peek.type\n when :identifier, :number\n true\n when :symbol\n ['(', '-'].include? @enum.peek.value\n else\n false\n end\n end", "def isIdentifier(str)\n digitsOfAlphabet = getAlphabet[0] # if its a digit\n charsOfAlphabet = getAlphabet[1] # if a letter\n\n # first character in name cannot be digit\n if digitsOfAlphabet.include?(str[0])\n return false\n end\n\n # if keyword includes str\n if getKeywords.include?(str)\n return false\n end\n\n # check is in the alphabet\n for i in 1..str.size-1\n character = str[i]\n\n unless digitsOfAlphabet.include?(character) or charsOfAlphabet.include?(character)\n return false\n end\n end\n\n return true\nend", "def property_name\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 82 )\n return_value = PropertyNameReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n root_0 = nil\n __ID421__ = nil\n __STRING422__ = nil\n __NUMBER423__ = nil\n reserved424 = nil\n\n tree_for_ID421 = nil\n tree_for_STRING422 = nil\n tree_for_NUMBER423 = nil\n\n begin\n # at line 814:3: ( ID | STRING | NUMBER | reserved )\n alt_106 = 4\n case look_106 = @input.peek( 1 )\n when ID then alt_106 = 1\n when STRING then alt_106 = 2\n when NUMBER then alt_106 = 3\n when GET, IF, IN, BREAK, INSTANCEOF, RETURN, CASE, CATCH, SET, CONTINUE, LET, DEFAULT, DELETE, SWITCH, THIS, DO, THROW, TRUE, TRY, TYPEOF, NEW, EACH, UNDEFINED, ELSE, NULL, UNLESS, UNTIL, FALSE, VAR, FINALLY, VOID, FOR, WHILE, WITH, FUNCTION, YIELD, DEF, CLASS_DEF, OBJECT_DEF, T__148, T__149, T__150 then alt_106 = 4\n else\n @state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )\n\n raise NoViableAlternative( \"\", 106, 0 )\n end\n case alt_106\n when 1\n root_0 = @adaptor.create_flat_list\n\n\n # at line 814:5: ID\n __ID421__ = match( ID, TOKENS_FOLLOWING_ID_IN_property_name_5805 )\n if @state.backtracking == 0\n\n tree_for_ID421 = @adaptor.create_with_payload( __ID421__ )\n @adaptor.add_child( root_0, tree_for_ID421 )\n\n end\n\n when 2\n root_0 = @adaptor.create_flat_list\n\n\n # at line 815:5: STRING\n __STRING422__ = match( STRING, TOKENS_FOLLOWING_STRING_IN_property_name_5811 )\n if @state.backtracking == 0\n\n tree_for_STRING422 = @adaptor.create_with_payload( __STRING422__ )\n @adaptor.add_child( root_0, tree_for_STRING422 )\n\n end\n\n when 3\n root_0 = @adaptor.create_flat_list\n\n\n # at line 816:5: NUMBER\n __NUMBER423__ = match( NUMBER, TOKENS_FOLLOWING_NUMBER_IN_property_name_5817 )\n if @state.backtracking == 0\n\n tree_for_NUMBER423 = @adaptor.create_with_payload( __NUMBER423__ )\n @adaptor.add_child( root_0, tree_for_NUMBER423 )\n\n end\n\n when 4\n root_0 = @adaptor.create_flat_list\n\n\n # at line 817:5: reserved\n @state.following.push( TOKENS_FOLLOWING_reserved_IN_property_name_5823 )\n reserved424 = reserved\n @state.following.pop\n if @state.backtracking == 0\n @adaptor.add_child( root_0, reserved424.tree )\n end\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n reserved424.tree.token.type = ID \n # <-- action\n end\n\n end# - - - - - - - rule clean up - - - - - - - -\n return_value.stop = @input.look( -1 )\n\n if @state.backtracking == 0\n\n return_value.tree = @adaptor.rule_post_processing( root_0 )\n @adaptor.set_token_boundaries( return_value.tree, return_value.start, return_value.stop )\n\n end\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n return_value.tree = @adaptor.create_error_node( @input, return_value.start, @input.look(-1), re )\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 82 )\n\n end\n \n return return_value\n end", "def nil_literal?(name)\n node.public_send(name).kind_of?(Rubinius::AST::NilLiteral)\n end", "def tokenize_keyword(&block) # :yields: SQLTree::Token\n literal = current_char\n literal << next_char while /[\\w]/ =~ peek_char\n\n if SQLTree::Token::KEYWORDS.include?(literal.upcase)\n handle_token(SQLTree::Token.const_get(literal.upcase).new(literal), &block)\n else\n handle_token(SQLTree::Token::Identifier.new(literal), &block)\n end\n end", "def next_item\n lexeme, token = @lexer.next, nil\n if lexeme[0].nil?\n token = { type: :eof }\n elsif lexeme[0].lol_string?\n token = { type: :string, data: lexeme[0][1..-2] }\n elsif lexeme[0].lol_integer?\n token = { type: :integer, data: lexeme[0].to_i }\n elsif lexeme[0].lol_float?\n token = { type: :float, data: lexeme[0].to_f }\n elsif lexeme[0].lol_boolean?\n token = { type: :boolean, data: (lexeme[0] == 'WIN') }\n elsif lexeme[0] == '!'\n token = { type: :exclamation }\n elsif lexeme[0] == \"\\n\"\n token = { type: :newline }\n else\n # Try to match keyword\n token_type = match_longest(lexeme[0], @token_table)\n unless token_type.nil?\n token = { type: token_type }\n # Consume all peeked lexemes\n token_type.to_s.count('_').times { @lexer.next }\n else\n # Try to match identifier\n if lexeme[0].lol_identifier?\n token = { type: :identifier, data: lexeme[0] }\n end\n end\n end\n raise UnknownTokenError.new(lexeme) if token.nil?\n token.merge(line: lexeme[1], pos: lexeme[2])\n end", "def vardeclaration\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 6 )\n __IDENTIFIER2__ = nil\n __IDENTIFIER3__ = nil\n __INTEGER4__ = nil\n t = nil\n\n begin\n # at line 438:2: (t= type IDENTIFIER ';' | t= primitivetype '[' INTEGER ']' IDENTIFIER ';' )\n alt_11 = 2\n look_11_0 = @input.peek( 1 )\n\n if ( look_11_0 == T__26 || look_11_0.between?( T__35, T__38 ) )\n look_11_1 = @input.peek( 2 )\n\n if ( look_11_1 == IDENTIFIER )\n alt_11 = 1\n elsif ( look_11_1 == T__32 )\n alt_11 = 2\n else\n raise NoViableAlternative( \"\", 11, 1 )\n end\n elsif ( look_11_0 == IDENTIFIER )\n alt_11 = 1\n else\n raise NoViableAlternative( \"\", 11, 0 )\n end\n case alt_11\n when 1\n # at line 439:4: t= type IDENTIFIER ';'\n @state.following.push( TOKENS_FOLLOWING_type_IN_vardeclaration_309 )\n t = type\n @state.following.pop\n __IDENTIFIER2__ = match( IDENTIFIER, TOKENS_FOLLOWING_IDENTIFIER_IN_vardeclaration_311 )\n # --> action\n\n \t validate_existing_class(t)\n \t if(not @current_method.nil?)\n \t @current_method.set_to_local_variables(__IDENTIFIER2__.text,VariableSymbol.new(__IDENTIFIER2__.text, t))\n \t else\n \t @current_class.set_to_instance_variables(__IDENTIFIER2__.text, VariableSymbol.new(__IDENTIFIER2__.text, t))\n \t end\n \t \n # <-- action\n match( T__31, TOKENS_FOLLOWING_T__31_IN_vardeclaration_321 )\n\n when 2\n # at line 450:4: t= primitivetype '[' INTEGER ']' IDENTIFIER ';'\n @state.following.push( TOKENS_FOLLOWING_primitivetype_IN_vardeclaration_333 )\n t = primitivetype\n @state.following.pop\n match( T__32, TOKENS_FOLLOWING_T__32_IN_vardeclaration_335 )\n __INTEGER4__ = match( INTEGER, TOKENS_FOLLOWING_INTEGER_IN_vardeclaration_337 )\n match( T__33, TOKENS_FOLLOWING_T__33_IN_vardeclaration_339 )\n __IDENTIFIER3__ = match( IDENTIFIER, TOKENS_FOLLOWING_IDENTIFIER_IN_vardeclaration_341 )\n # --> action\n\n variable = VariableSymbol.new(__IDENTIFIER3__.text, t, __INTEGER4__.text.to_i) \n if(not @current_method.nil?)\n @current_method.set_to_local_variables(__IDENTIFIER3__.text, variable)\n else\n @current_class.set_to_instance_variables(__IDENTIFIER3__.text, variable)\n end\n generate('ary', variable.type, variable.dim , variable.address )\n \n # <-- action\n match( T__31, TOKENS_FOLLOWING_T__31_IN_vardeclaration_354 )\n\n end\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 6 )\n\n end\n \n return \n end", "def parse_identifier container, single, tk, comment # :nodoc:\n case tk[:text]\n when 'private', 'protected', 'public', 'private_class_method',\n 'public_class_method', 'module_function' then\n parse_visibility container, single, tk\n return true\n when 'private_constant', 'public_constant'\n parse_constant_visibility container, single, tk\n return true\n when 'attr' then\n parse_attr container, single, tk, comment\n when /^attr_(reader|writer|accessor)$/ then\n parse_attr_accessor container, single, tk, comment\n when 'alias_method' then\n parse_alias container, single, tk, comment\n when 'require', 'include' then\n # ignore\n else\n if comment.text =~ /\\A#\\#$/ then\n case comment.text\n when /^# +:?attr(_reader|_writer|_accessor)?:/ then\n parse_meta_attr container, single, tk, comment\n else\n method = parse_meta_method container, single, tk, comment\n method.params = container.params if\n container.params\n method.block_params = container.block_params if\n container.block_params\n end\n end\n end\n\n false\n end", "def parse_primary_expression\n case lookahead.name\n when :BOOLEAN, :NIL, :NUMBER, :REGEXP\n then node(:literal, value: lex.value)\n when :STRING then node(:literal, value: \"'%s'\" % lex.value)\n when :identifier then node(:identifier, name: lex.value)\n when :new then parse_new_expression\n when :'(' then parse_group_expression\n when :'[' then parse_array\n when :'{' then parse_object\n when :'->' then parse_lambda_expression\n else unexpected_error(lex)\n end\n end", "def lex_en_expr_beg; end", "def lex_en_expr_beg; end", "def lex_en_expr_beg; end", "def member_special_identifier(member)\n \t_IDENTIFIER18 = nil\n \t_EXTENDS19 = nil\n\n\n\n\n # 378:5: ( IDENTIFIER | ',' | '&' | '<' | '>' | EXTENDS | '?' )\n alt50 = 7\n # 377:1: member_special_identifier[member] : ( IDENTIFIER | ',' | '&' | '<' | '>' | EXTENDS | '?' );\n case look_ahead(1)\n when :IDENTIFIER\n alt50 = 1\n when :COMMA\n alt50 = 2\n when :ECOMMERCIAL\n alt50 = 3\n when :LEFT_ANGULAR_BRACKET\n alt50 = 4\n when :RIGHT_ANGULAR_BRACKET\n alt50 = 5\n when :EXTENDS\n alt50 = 6\n when :QUESTION_MARK\n alt50 = 7\n else\n raise \"Expected: 377:1: member_special_identifier[member] : ( IDENTIFIER | ',' | '&' | '<' | '>' | EXTENDS | '?' );\"\n\n end\n case alt50\n when 1\n # 378:7: IDENTIFIER\n _IDENTIFIER18 = @input.look_ahead(1)\n match(:IDENTIFIER)\n member.type += _IDENTIFIER18.text\n when 2\n # 379:7: ','\n match(:COMMA)\n member.type += \", \"\n when 3\n # 380:7: '&'\n match(:ECOMMERCIAL)\n member.type += \" & \"\n when 4\n # 381:7: '<'\n match(:LEFT_ANGULAR_BRACKET)\n member.type += '<'\n when 5\n # 382:7: '>'\n match(:RIGHT_ANGULAR_BRACKET)\n member.type += '>'\n when 6\n # 383:7: EXTENDS\n _EXTENDS19 = @input.look_ahead(1)\n match(:EXTENDS)\n member.type += \" #{_EXTENDS19.text} \"\n when 7\n # 384:7: '?'\n match(:QUESTION_MARK)\n member.type += '?' \n end\n\n\n\n end", "def expanded_identifier\n case type\n when :@ident, :@const, :@gvar, :@cvar, :@ivar, :@kw, :@op\n self[1]\n when :var_ref, :var_field, :const_ref, :symbol\n self[1].expanded_identifier\n when :top_const_ref, :top_const_field\n \"::#{self[1].expanded_identifier}\"\n when :const_path_ref, :const_path_field\n lhs, rhs = children\n \"#{lhs.expanded_identifier}::#{rhs.expanded_identifier}\"\n end\n end", "def match?(name, literal) true end", "def get_name\n token = \"\"\n\n return expected(\"Name\") unless is_alpha($lookahead)\n\n while is_alnum($lookahead)\n token << $lookahead\n lookahead\n end\n\n skip_white\n\n \"_#{token}\"\nend", "def on_ident(name, ((line, column)))\n add_variable(name, line, column)\n end", "def class_special_identifier(class_def)\n \t_IDENTIFIER2 = nil\n \t_EXTENDS3 = nil\n\n\n\n\n # 38:5: ( IDENTIFIER | ',' | '&' | '<' | '>' | EXTENDS )\n alt6 = 6\n # 37:1: class_special_identifier[class_def] : ( IDENTIFIER | ',' | '&' | '<' | '>' | EXTENDS );\n case look_ahead(1)\n when :IDENTIFIER\n alt6 = 1\n when :COMMA\n alt6 = 2\n when :ECOMMERCIAL\n alt6 = 3\n when :LEFT_ANGULAR_BRACKET\n alt6 = 4\n when :RIGHT_ANGULAR_BRACKET\n alt6 = 5\n when :EXTENDS\n alt6 = 6\n else\n raise \"Expected: 37:1: class_special_identifier[class_def] : ( IDENTIFIER | ',' | '&' | '<' | '>' | EXTENDS );\"\n\n end\n case alt6\n when 1\n # 38:7: IDENTIFIER\n _IDENTIFIER2 = @input.look_ahead(1)\n match(:IDENTIFIER)\n class_def.name += _IDENTIFIER2.text\n when 2\n # 39:7: ','\n match(:COMMA)\n class_def.name += \", \"\n when 3\n # 40:7: '&'\n match(:ECOMMERCIAL)\n class_def.name += \" & \"\n when 4\n # 41:7: '<'\n match(:LEFT_ANGULAR_BRACKET)\n class_def.name += '<'\n when 5\n # 42:7: '>'\n match(:RIGHT_ANGULAR_BRACKET)\n class_def.name += '>'\n when 6\n # 43:7: EXTENDS\n _EXTENDS3 = @input.look_ahead(1)\n match(:EXTENDS)\n class_def.name += \" #{_EXTENDS3.text} \"\n end\n\n\n\n end", "def aname!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 40 )\n\n type = ANAME\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 370:8: 'name'\n match( \"name\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 40 )\n\n end", "def identifier\n id || name || default_identifier\n end", "def eName2Exp(name)\n # puts \"eName2Exp with name=#{name}\"\n ref = $refs.find do |ref|\n if ref.ref.respond_to?(:name) then\n ref.ref.name == name.to_sym\n else\n ref.name == name.to_sym\n end\n end\n # puts \"ref=#{ref}\"\n unless ref\n return Value.new($bit8,name.to_i)\n end\n return ref\nend", "def type_name\n if @scanner.peek.value == 'int'\n\t t = 'int'\n elsif @scanner.peek.value == 'void'\n\t t = 'void'\n\n end\n match(Token.new(:reserved, t))\n t\n end", "def map_primitive(name, ident, &block)\n lang_eval do\n @prim_ident[name.to_sym] = ident.to_s\n @prim_to_lit[name.to_sym] = block\n end\n nil\n end", "def next_token\n\n if @ss.bol?\n @line+=1\n @[email protected]\n end\n\n position=[@line,@ss.pos-@old_pos+1]\n\n return :eos if @ss.eos?\n\n case\n when text = @ss.scan(NEWLINE)\n next_token()\n when text = @ss.scan(SPACE)\n next_token()\n when text = @ss.scan(COMMENT)\n next_token()\n when text = @ss.scan(ARROW)\n return Token.new [:arrow,text,position]\n when text = @ss.scan(LT)\n return Token.new [:lt,text,position]\n when text = @ss.scan(LBRACK)\n return Token.new [:lbrack,text,position]\n when text = @ss.scan(RBRACK)\n return Token.new [:rbrack,text,position]\n when text = @ss.scan(IDENTIFIER)\n case\n when value = text.match(IDENT)\n return Token.new [:IDENT,text,position]\n when value = text.match(FLOAT)\n return Token.new [:FLOAT,text,position]\n when value = text.match(INT)\n return Token.new [:INT,text,position]\n when value = text.match(STRING)\n return Token.new [:STRING,text,position]\n when value = text.match(MODULE)\n return Token.new [:module,text,position]\n when value = text.match(CLASS)\n return Token.new [:class,text,position]\n when value = text.match(END_)\n return Token.new [:end,text,position]\n when value = text.match(ATTR)\n return Token.new [:attr,text,position]\n when value = text.match(LPAREN)\n return Token.new [:lparen,text,position]\n when value = text.match(RPAREN)\n return Token.new [:rparen,text,position]\n else\n return Token.new [:identifier,text,position]\n end\n else\n x = @ss.getch\n return Token.new [x, x,position]\n end\n end", "def test_name_grabbing\n\ttext = 'name name2 NameSpace:Name-foo_bar remaining>'\n\ttoker = NQXML::Tokenizer.new(text)\n\tassert_equal('name', toker.nextName())\n\ttoker.skipSpaces()\n\tassert_equal('name2', toker.nextName())\n\ttoker.skipSpaces()\n\tassert_equal('NameSpace:Name-foo_bar', toker.nextName())\n\tassert_equal(' remaining', toker.textUpTo('>', false, false))\n\tassert_equal('>', toker.nextChar())\n\tassert(toker.eof?())\n end", "def identifier\n return @identifier unless @identifier.is_a?(BNode)\n if @identifier.equal?(self)\n # Generate from the sequence a..zzz, unless already taken\n @@next_generated = @@next_generated.succ while @@named_nodes.has_key?(@@next_generated)\n @identifier, @@next_generated = @@next_generated, @@next_generated.succ\n else\n # Previously allocated node\n @identifier = @identifier.identifier\n end\n @identifier\n end", "def == o\n identifier == o.identifier\n end", "def create_name( name, source_token = nil )\n if name.is_a?(Token) then\n source_token = name if source_token.nil?\n name = name.text\n end\n \n name = Name.new(name, @name, source_token)\n yield( name ) if block_given?\n return name\n end", "def parse_variable_declaration\n type = parse_type_keyword\n name = parse_variable\n\n value = nil\n\n if peek?(:OP_ASSIGNMENT)\n expect(:OP_ASSIGNMENT)\n value = parse_expression\n end\n\n StmtVarDecl.new(type, name, value)\n end", "def literal_token\n if match = @chunk.match(OPERATOR)\n value, _ = *match\n else\n value = @chunk[0]\n end\n tag = value\n\n if COMPOUND_ASSIGN.include?(value)\n tag = :COP\n else\n case value\n when '(', '{', '[' then @ends.push(INVERSES[value])\n when ')', '}', ']'\n prev = @tokens[-1]\n pair(value)\n tokens.delete_at(-1) if prev && prev[0] == :TERM\n end\n end\n token(tag, value)\n value.size\n end", "def variable_name_expr; end", "def identificador!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 79 )\n\n\n\n type = Identificador\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 616:6: Letter ( Letter | Digito )*\n letter!\n\n # at line 616:13: ( Letter | Digito )*\n while true # decision 27\n alt_27 = 2\n look_27_0 = @input.peek( 1 )\n\n if ( look_27_0 == 0x24 || look_27_0.between?( 0x30, 0x39 ) || look_27_0.between?( 0x41, 0x5a ) || look_27_0 == 0x5f || look_27_0.between?( 0x61, 0x7a ) || look_27_0.between?( 0xc0, 0xd6 ) || look_27_0.between?( 0xd8, 0xf6 ) || look_27_0.between?( 0xf8, 0x1fff ) || look_27_0.between?( 0x3040, 0x318f ) || look_27_0.between?( 0x3300, 0x337f ) || look_27_0.between?( 0x3400, 0x3d2d ) || look_27_0.between?( 0x4e00, 0x9fff ) || look_27_0.between?( 0xf900, 0xfaff ) )\n alt_27 = 1\n\n end\n case alt_27\n when 1\n # at line \n if @input.peek(1) == 0x24 || @input.peek( 1 ).between?( 0x30, 0x39 ) || @input.peek( 1 ).between?( 0x41, 0x5a ) || @input.peek(1) == 0x5f || @input.peek( 1 ).between?( 0x61, 0x7a ) || @input.peek( 1 ).between?( 0xc0, 0xd6 ) || @input.peek( 1 ).between?( 0xd8, 0xf6 ) || @input.peek( 1 ).between?( 0xf8, 0x1fff ) || @input.peek( 1 ).between?( 0x3040, 0x318f ) || @input.peek( 1 ).between?( 0x3300, 0x337f ) || @input.peek( 1 ).between?( 0x3400, 0x3d2d ) || @input.peek( 1 ).between?( 0x4e00, 0x9fff ) || @input.peek( 1 ).between?( 0xf900, 0xfaff )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n break # out of loop for decision 27\n end\n end # loop for decision 27\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 79 )\n\n\n end", "def literal?(node); end", "def find_literal(what)\n idx = @literals.index(what)\n return idx if idx\n add_literal(what)\n end", "def next_token\n token = @enum[@pointer]\n raise NonstringTokenError unless token.nil? || token.kind_of?(String) \n @pointer += 1\n token\n end", "def ident\n ensure_valid\n @ident\n end", "def read_declarator(rname, basety, params, ctx)\n if next_token?('(')\n # '(' is either beginning of grouping parentheses or of a function parameter list.\n # If the next token is a type name, a parameter list must follow.\n if is_type?(peek)\n return read_declarator_func(basety, params)\n end\n\n # If not, it's grouping. In that case we have to read from outside.\n # For example, consider int (*)(), which is \"pointer to function returning int\".\n # We have only read \"int\" so far. We don't want to pass \"int\" to\n # a recursive call, or otherwise we would get \"pointer to int\".\n # Here, we pass a dummy object to get \"pointer to <something>\" first,\n # continue reading to get \"function returning int\", and then combine them.\n stub = Type.make_stub_type\n t = read_declarator(rname, stub, params, ctx)\n expect!(')')\n stub.replace_by!(read_declarator_tail(basety, params))\n return t\n end\n\n if next_token?('*')\n skip_type_qualifiers!\n return read_declarator(rname, Type.make_ptr_type(basety), params, ctx)\n end\n\n tok = get\n if tok.kind == T::IDENT\n if ctx == DECL::CAST\n Util.errort!(tok, \"identifier is not expected, but got #{tok}\")\n end\n rname << tok.sval # Write as return value\n return read_declarator_tail(basety, params)\n end\n\n if [DECL::BODY, DECL::PARAM].include?(ctx)\n Util.errort!(tok, \"identifier, ( or * are expected, but got #{tok}\")\n end\n @lexer.unget_token(tok)\n read_declarator_tail(basety, params)\n end", "def determine_token_slot(name)\n slot = name.split.shift.downcase\n\n case slot\n when 'chest'\n 'Chest'\n when 'gauntlets'\n 'Hands'\n when 'crown'\n 'Head'\n when 'helm'\n 'Head'\n when 'leggings'\n 'Legs'\n when 'mantle'\n 'Shoulder'\n when 'shoulders'\n 'Shoulder'\n end\n end", "def statement\n case @enum.peek.value\n when 'break'\n break_statement\n when 'continue'\n continue_statement\n when 'if'\n if_statement\n when 'printf'\n printf_func_call\n when 'return'\n return_statement\n when 'scanf'\n scanf_func_call\n when 'while'\n while_statement\n else\n if @local.include?(@enum.peek.value)\n @instruction.push('local[' + @local.index(@enum.peek.value).to_s + ']')\n elsif @global.include?(@enum.peek.value)\n @instruction.push('global[' + @global.index(@enum.peek.value).to_s + ']')\n else\n @instruction.push(@enum.peek.value)\n end\n match(:identifier)\n statement_tail\n end\n end", "def lex_en_expr_beg=(_arg0); end", "def lex_en_expr_beg=(_arg0); end", "def lex_en_expr_beg=(_arg0); end", "def get_token\n\t\tt = Token.new\n\t\tcase @src[@lineno][@linepos]\n\t\t\twhen ' ' then\n\t\t\t\tskip_whitespace\n\t\t\twhen '\\f' then #less likely to see this\n\t\t\t\tskip_whitespace\n\t\t\twhen '\\t' then\n\t\t\t\tskip_whitespace\n\t\t\twhen '\\v' then\n\t\t\t\tskip_whitespace\n\t\t\twhen '0'..'9' then\n\t\t\t\tt = parse_number\n\t\t\twhen 'A-Z' then\n\t\t\t\tt = parse_name\n\t\t\twhen 'a-z' then\n\t\t\t\tparse_name\n\t\t\twhen '_' then\n\t\t\t\tt = parse_name\n\t\t\twhen /[~!$%\\^&*()-+=|{}\\[\\]\\:;\\/?<>,.]/ then #very much check\n\t\t\t\tt = parse_operator\n\t\t\twhen '\"' then\n\t\t\t\tt = parse_string\n\t\tend\n\tend", "def variable_declaration\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 12 )\n return_value = VariableDeclarationReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n root_0 = nil\n\n _last = _first_0 = nil\n char_literal40 = nil\n __ID43__ = nil\n declaration_target41 = nil\n expression42 = nil\n\n tree_for_char_literal40 = nil\n tree_for_ID43 = nil\n\n begin\n # at line 75:3: ( ^( '=' declaration_target expression ) | ID )\n alt_9 = 2\n look_9_0 = @input.peek( 1 )\n\n if ( look_9_0 == ASGN )\n alt_9 = 1\n elsif ( look_9_0 == ID )\n alt_9 = 2\n else\n raise NoViableAlternative( \"\", 9, 0 )\n end\n case alt_9\n when 1\n root_0 = @adaptor.create_flat_list\n\n\n # at line 75:5: ^( '=' declaration_target expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n char_literal40 = match( ASGN, TOKENS_FOLLOWING_ASGN_IN_variable_declaration_347 )\n\n tree_for_char_literal40 = @adaptor.copy_node( char_literal40 )\n\n root_1 = @adaptor.become_root( tree_for_char_literal40, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_declaration_target_IN_variable_declaration_349 )\n declaration_target41 = declaration_target\n @state.following.pop\n\n @adaptor.add_child( root_1, declaration_target41.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_variable_declaration_351 )\n expression42 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression42.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 2\n root_0 = @adaptor.create_flat_list\n\n\n # at line 76:5: ID\n _last = @input.look\n __ID43__ = match( ID, TOKENS_FOLLOWING_ID_IN_variable_declaration_359 )\n\n tree_for_ID43 = @adaptor.copy_node( __ID43__ )\n\n @adaptor.add_child( root_0, tree_for_ID43 )\n\n\n\n end\n return_value.tree = @adaptor.rule_post_processing( root_0 )\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 12 )\n\n end\n \n return return_value\n end", "def namechar!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 45 )\n\n \n # - - - - main rule block - - - -\n # at line \n if @input.peek( 1 ).between?( 0x2d, 0x2e ) || @input.peek( 1 ).between?( 0x30, 0x3a ) || @input.peek( 1 ).between?( 0x41, 0x5a ) || @input.peek(1) == 0x5f || @input.peek( 1 ).between?( 0x61, 0x7a )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 45 )\n\n end", "def id?(str)\n token = @tokens[@p]\n return false unless token && token[0] == :id\n return false unless token[1] == str\n @p += 1\n token[1]\n end", "def ==(identifier)\n @expression == identifier\n end", "def getNextToken\n \n #Check if the end has been reached\n if @currentChar == nil\n return\n end\n if @currentChar.match(/\\s/) != nil\n skipWhitespaces\n end\n \n if @currentChar == '%'\n comment\n if @currentChar.match(/\\s/) != nil\n skipWhitespaces\n end\n end \n \n if @currentChar.match(/[A-Za-z0-9_]/) != nil\n return Token.new(NAME, name)\n end\n \n if @currentChar == \"\\\"\"\n return Token.new(STRING, string)\n end\n \n if @currentChar == '{'\n advance\n return Token.new(OPENING_BRACE,'{')\n end\n \n if @currentChar == '}'\n advance\n return Token.new(CLOSING_BRACE,'}')\n end\n \n if @currentChar == '['\n advance\n return Token.new(OPENING_BRACKET,'[')\n end\n \n if @currentChar == ']'\n advance\n return Token.new(CLOSING_BRACKET,']')\n end\n \n if @currentChar == ':'\n advance\n return Token.new(COLON,':')\n end\n \n if @currentChar == '*'\n advance\n return Token.new(ASTERIX,'*')\n end\n \n if @currentChar == '='\n advance\n return Token.new(EQUALS,'=')\n end\n \n if @currentChar == ';'\n advance\n return Token.new(SEMICOLON,';')\n end\n \n if @currentChar == '^'\n advance\n return Token.new(CIRCUMFLEX,'^')\n end\n \n if @currentChar == '+'\n advance\n return Token.new(PLUS,'+')\n end\n if @currentChar == '('\n advance\n return Token.new(OPENING_PARANTHESIS,'(')\n end\n if @currentChar == ')'\n advance\n return Token.new(CLOSING_PARANTHESIS,')')\n end\n if @currentChar == '.'\n advance\n return Token.new(DOT,'.')\n end\n if @currentChar == '#'\n advance\n return Token.new(HASH,'#')\n end\n if @currentChar == ','\n advance\n return Token.new(COMMA,',')\n end\n error\n \n return Token.new(EOF,'EOF') \n \n end", "def identifier; end", "def identifier; end", "def id!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 102 )\n\n type = ID\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 923:5: ( '$' | '_' | 'a' .. 'z' | 'A' .. 'Z' ) ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' | '$' )*\n if @input.peek(1) == 0x24 || @input.peek( 1 ).between?( 0x41, 0x5a ) || @input.peek(1) == 0x5f || @input.peek( 1 ).between?( 0x61, 0x7a )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n # at line 924:5: ( 'a' .. 'z' | 'A' .. 'Z' | '0' .. '9' | '_' | '$' )*\n while true # decision 31\n alt_31 = 2\n look_31_0 = @input.peek( 1 )\n\n if ( look_31_0 == 0x24 || look_31_0.between?( 0x30, 0x39 ) || look_31_0.between?( 0x41, 0x5a ) || look_31_0 == 0x5f || look_31_0.between?( 0x61, 0x7a ) )\n alt_31 = 1\n\n end\n case alt_31\n when 1\n # at line \n if @input.peek(1) == 0x24 || @input.peek( 1 ).between?( 0x30, 0x39 ) || @input.peek( 1 ).between?( 0x41, 0x5a ) || @input.peek(1) == 0x5f || @input.peek( 1 ).between?( 0x61, 0x7a )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n\n else\n break # out of loop for decision 31\n end\n end # loop for decision 31\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 102 )\n\n end", "def next_token\n result = peek_token\n @start = @finish\n return result if @start >= @expr.length\n\n if @expr[@start].numeric?\n @finish = @start + 1\n while @finish < @expr.length && @expr[@finish].to_s.numeric?\n @finish = @finish + 1\n end\n else\n @finish = @start + 1\n end\n result\n end", "def parse_literal(expr)\n val = expr[1][1][1]\n val = val.to_sym if expr[0] == :symbol_literal ||\n expr[0] == :assoc_new\n val\n end", "def lex_en_expr_mid; end", "def lex_en_expr_mid; end", "def lex_en_expr_mid; end", "def octal_integer_literal\n code = @codes[@pos]\n if code.nil?\n return nil\n elsif code == 0x30 and (code1 = @codes[@pos + 1]) >= 0x30 and code1 <= 0x37\n @pos += 1\n pos0 = @pos\n while code = @codes[@pos] and code >= 0x30 and code <= 0x37\n @pos += 1\n end\n if identifier_start?(code)\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n else\n return ECMA262::ECMA262Numeric.new(@codes[pos0...@pos].pack(\"U*\").to_i(8))\n end\n else\n nil\n end\n end", "def literal\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 22 )\n\n\n value = nil\n\n\n a = nil\n\n\n begin\n # at line 142:3: (a= INTEGER |a= FLOAT |a= BOOLEAN |a= STRING |a= CHAR )\n alt_38 = 5\n case look_38 = @input.peek( 1 )\n when INTEGER then alt_38 = 1\n when FLOAT then alt_38 = 2\n when BOOLEAN then alt_38 = 3\n when STRING then alt_38 = 4\n when CHAR then alt_38 = 5\n else\n @state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )\n\n\n\n raise NoViableAlternative( \"\", 38, 0 )\n\n end\n case alt_38\n when 1\n # at line 142:5: a= INTEGER\n a = match( INTEGER, TOKENS_FOLLOWING_INTEGER_IN_literal_1037 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Int, a.text) \n # <-- action\n end\n\n\n when 2\n # at line 143:5: a= FLOAT\n a = match( FLOAT, TOKENS_FOLLOWING_FLOAT_IN_literal_1047 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Float, a.text) \n # <-- action\n end\n\n\n when 3\n # at line 144:5: a= BOOLEAN\n a = match( BOOLEAN, TOKENS_FOLLOWING_BOOLEAN_IN_literal_1059 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Bool, a.text) \n # <-- action\n end\n\n\n when 4\n # at line 145:5: a= STRING\n a = match( STRING, TOKENS_FOLLOWING_STRING_IN_literal_1069 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:String, a.text) \n # <-- action\n end\n\n\n when 5\n # at line 146:5: a= CHAR\n a = match( CHAR, TOKENS_FOLLOWING_CHAR_IN_literal_1080 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Char, a.text) \n # <-- action\n end\n\n\n end\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 22 )\n\n\n end\n\n return value\n end", "def spot_op_cdecl\n nd_lhs, op, _nd_rhs = @node.children\n *nd_parent_lhs, _const = nd_lhs.children\n if @name == op\n @snippet = @fetch[nd_lhs.last_lineno]\n if @snippet.match(/\\G\\s*(#{ Regexp.quote(op) })=/, nd_lhs.last_column)\n @beg_column = $~.begin(1)\n @end_column = $~.end(1)\n end\n else\n # constant access error\n @end_column = nd_lhs.last_column\n if nd_parent_lhs.empty? # example: ::C += 1\n if nd_lhs.first_lineno == nd_lhs.last_lineno\n @snippet = @fetch[nd_lhs.last_lineno]\n @beg_column = nd_lhs.first_column\n end\n else # example: Foo::Bar::C += 1\n if nd_parent_lhs.last.last_lineno == nd_lhs.last_lineno\n @snippet = @fetch[nd_lhs.last_lineno]\n @beg_column = nd_parent_lhs.last.last_column\n end\n end\n end\n end", "def nextPublicIdLiteral(tagName)\n\t quote = peekChar()\n\t if quote != '\"' && quote != \"'\"\n\t\tstr = \"quoted literal not quoted in #{tagName} PUBLIC id\"\n\t\traise ParserError.new(str, self)\n\t end\n\t skipChar()\t\t# eat quote\n\n\t text = textUpTo(quote, false, true)\n\t if !(text =~ PUBLIC_ID_LITERAL_REGEX)\n\t\tstr = \"#{tagName} PUBLIC public id literal contains illegal\" +\n\t\t ' character(s)'\n\t\t raise ParserError.new(str, self)\n\t end\n\t skipChar()\t\t# eat quote\n\n# FIX - we are returning literal text, but should do something\n# intelligent.\n\n\t return quote + text + quote\n\tend", "def peek_token\n return nil if @start >= @expr.length\n if @start == 0 && @finish == 0\n return @expr[0]\n else\n token = @expr[@start...@finish]\n\n if token.empty?\n @finish = @finish + 1\n peek_token\n else\n return token\n end\n end\n end", "def is_named? n\n n && n.downcase == @name\n end", "def valid_name!(name)\n not_empty!(name)\n unless [String, Symbol].include?(name.class)\n coercion_error!\n end\n name\n end", "def lex_en_expr_variable; end", "def lex_en_expr_variable; end", "def lex_en_expr_variable; end" ]
[ "0.656172", "0.6277313", "0.6253718", "0.5884431", "0.5764008", "0.5759629", "0.56021506", "0.55904865", "0.54731596", "0.5452019", "0.54476976", "0.54029274", "0.53734994", "0.53411716", "0.5317944", "0.5303333", "0.52950895", "0.5293675", "0.52487403", "0.52416605", "0.5236312", "0.5236312", "0.52340466", "0.5233169", "0.52330124", "0.5219419", "0.5217308", "0.51865715", "0.51742935", "0.51567435", "0.5131395", "0.51211643", "0.51117647", "0.51104766", "0.5076188", "0.505905", "0.50554425", "0.49474916", "0.49381152", "0.49359363", "0.49332905", "0.49247158", "0.49106106", "0.49073866", "0.49073866", "0.49073866", "0.4897418", "0.48957348", "0.48938307", "0.48918062", "0.4888281", "0.48821574", "0.48800188", "0.48605052", "0.4837771", "0.48224276", "0.48159108", "0.48041713", "0.48034537", "0.47963324", "0.47898623", "0.47856024", "0.47679302", "0.4762841", "0.4759423", "0.4752718", "0.47517458", "0.47492024", "0.47453794", "0.4736454", "0.4733422", "0.47147605", "0.47004914", "0.46794567", "0.46794567", "0.46794567", "0.46763092", "0.4664645", "0.46628892", "0.46627098", "0.465694", "0.46443477", "0.4642404", "0.4642404", "0.46403834", "0.46390435", "0.46341816", "0.46341616", "0.46341616", "0.46341616", "0.46302262", "0.46189064", "0.4614009", "0.46133667", "0.46099606", "0.4609443", "0.4608767", "0.4608386", "0.4608386", "0.4608386" ]
0.69444585
0
Tests next literal is Punctuator or not If literal is Punctuator return ECMA262::Punctuator object and forward lexical parser position. Otherwise return nil and position is not changed.
def punctuator code0 = @codes[@pos] code1 = @codes[@pos+1] code2 = @codes[@pos+2] code3 = @codes[@pos+3] if false elsif code0 == 0x28 # ( @pos += 1 # ( return ECMA262::PUNC_LPARENTHESIS elsif code0 == 0x29 # ) @pos += 1 # ) return ECMA262::PUNC_RPARENTHESIS elsif code0 == 0x7b # { @pos += 1 # { return ECMA262::PUNC_LCURLYBRAC elsif code0 == 0x7d # } @pos += 1 # } return ECMA262::PUNC_RCURLYBRAC elsif code0 == 0x3b # ; @pos += 1 # ; return ECMA262::PUNC_SEMICOLON elsif code0 == 0x3d # = if code1 == 0x3d and code2 == 0x3d # === @pos += 3 return ECMA262::PUNC_SEQ end if code1 == 0x3d # == @pos += 2 return ECMA262::PUNC_EQ end @pos += 1 # = return ECMA262::PUNC_ASSIGN elsif code0 == 0x21 # ! if code1 == 0x3d and code2 == 0x3d # !== @pos += 3 return ECMA262::PUNC_SNEQ end if code1 == 0x3d # != @pos += 2 return ECMA262::PUNC_NEQ end @pos += 1 # ! return ECMA262::PUNC_LNOT elsif code0 == 0x25 # % if code1 == 0x3d # %= @pos += 2 return ECMA262::PUNC_MODASSIGN end @pos += 1 # % return ECMA262::PUNC_MOD elsif code0 == 0x26 # & if code1 == 0x3d # &= @pos += 2 return ECMA262::PUNC_ANDASSIGN end if code1 == 0x26 # && @pos += 2 return ECMA262::PUNC_LAND end @pos += 1 # & return ECMA262::PUNC_AND elsif code0 == 0x2a # * if code1 == 0x3d # *= @pos += 2 return ECMA262::PUNC_MULASSIGN end @pos += 1 # * return ECMA262::PUNC_MUL elsif code0 == 0x2b # + if code1 == 0x3d # += @pos += 2 return ECMA262::PUNC_ADDASSIGN end if code1 == 0x2b # ++ @pos += 2 return ECMA262::PUNC_INC end @pos += 1 # + return ECMA262::PUNC_ADD elsif code0 == 0x2c # , @pos += 1 # , return ECMA262::PUNC_COMMA elsif code0 == 0x2d # - if code1 == 0x3d # -= @pos += 2 return ECMA262::PUNC_SUBASSIGN end if code1 == 0x2d # -- @pos += 2 return ECMA262::PUNC_DEC end @pos += 1 # - return ECMA262::PUNC_SUB elsif code0 == 0x2e # . @pos += 1 # . return ECMA262::PUNC_PERIOD elsif code0 == 0x3a # : @pos += 1 # : return ECMA262::PUNC_COLON elsif code0 == 0x3c # < if code1 == 0x3d # <= @pos += 2 return ECMA262::PUNC_LTEQ end if code1 == 0x3c and code2 == 0x3d # <<= @pos += 3 return ECMA262::PUNC_LSHIFTASSIGN end if code1 == 0x3c # << @pos += 2 return ECMA262::PUNC_LSHIFT end @pos += 1 # < return ECMA262::PUNC_LT elsif code0 == 0x3e # > if code1 == 0x3e and code2 == 0x3e and code3 == 0x3d # >>>= @pos += 4 return ECMA262::PUNC_URSHIFTASSIGN end if code1 == 0x3e and code2 == 0x3e # >>> @pos += 3 return ECMA262::PUNC_URSHIFT end if code1 == 0x3e and code2 == 0x3d # >>= @pos += 3 return ECMA262::PUNC_RSHIFTASSIGN end if code1 == 0x3e # >> @pos += 2 return ECMA262::PUNC_RSHIFT end if code1 == 0x3d # >= @pos += 2 return ECMA262::PUNC_GTEQ end @pos += 1 # > return ECMA262::PUNC_GT elsif code0 == 0x3f # ? @pos += 1 # ? return ECMA262::PUNC_CONDIF elsif code0 == 0x5b # [ @pos += 1 # [ return ECMA262::PUNC_LSQBRAC elsif code0 == 0x5d # ] @pos += 1 # ] return ECMA262::PUNC_RSQBRAC elsif code0 == 0x5e # ^ if code1 == 0x3d # ^= @pos += 2 return ECMA262::PUNC_XORASSIGN end @pos += 1 # ^ return ECMA262::PUNC_XOR elsif code0 == 0x7c # | if code1 == 0x7c # || @pos += 2 return ECMA262::PUNC_LOR end if code1 == 0x3d # |= @pos += 2 return ECMA262::PUNC_ORASSIGN end @pos += 1 # | return ECMA262::PUNC_OR elsif code0 == 0x7e # ~ @pos += 1 # ~ return ECMA262::PUNC_NOT end nil end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def token\n identifier_name || numeric_literal || punctuator || string_literal\n end", "def div_punctuator\n if @codes[@pos] == 0x2f\n if @codes[@pos+1] == 0x3d\n @pos += 2\n return ECMA262::PUNC_DIVASSIGN\n else\n @pos += 1\n return ECMA262::PUNC_DIV\n end\n end\n nil\n end", "def char_tokenize(p_token, lineno, pos)\n\n\t# could be a KEYWORD, TYPE, or ID here\n\tcase p_token\n\twhen /\\b(while)\\b/\n\t\treturn Token.new(\"T_WHILE\", p_token, lineno, pos)\n\twhen /\\b(if)\\b/\n\t\treturn Token.new(\"T_IF\", p_token, lineno, pos)\n\twhen /\\b(false)\\b/\n\t\treturn Token.new(\"T_BOOLEAN\", p_token, lineno, pos)\n\twhen /\\b(true)\\b/\n\t\treturn Token.new(\"T_BOOLEAN\", p_token, lineno, pos)\n\twhen /\\b(print)\\b/\n\t\treturn Token.new(\"T_PRINT\", p_token, lineno, pos)\n\twhen /\\b(int)\\b/\n\t\treturn Token.new(\"T_TYPE\", p_token, lineno, pos)\n\twhen /\\b(string)\\b/\n\t\treturn Token.new(\"T_TYPE\", p_token, lineno, pos)\n\twhen /\\b(boolean)\\b/\n\t\treturn Token.new(\"T_TYPE\", p_token, lineno, pos)\n\twhen /\\b[a-z]\\b/\n\t\treturn Token.new(\"T_ID\", p_token, lineno, pos)\n\telse\n\t\traise UnknownSymbolError.new(p_token, lineno, pos)\n\tend\n\t\nend", "def isPunctChar(ch)\n return UNICODE_PUNCT_RE =~ ch\n end", "def speak_as_literal_punctuation(element)\n speak_as(\n element,\n get_regular_expression_of_symbols,\n 'literal-punctuation',\n method(:operation_speak_as_literal_punctuation)\n )\n end", "def punctuation?\n PUNCTUATION.include? @kind\n end", "def tokenize (p_token, type, lineno, pos)\n\t\n\tif type == \"op\"\n\t\treturn op_tokenize(p_token, lineno, pos)\n\t\n\telsif type == \"character\"\n\t\treturn char_tokenize(p_token, lineno, pos)\n\t\n\telsif type == \"string\"\n\t\treturn string_tokenize(p_token, lineno, pos)\n\t\n\telsif type == \"digit\"\n\t\treturn digit_tokenize(p_token, lineno, pos)\n\t\n\telse\n\t\t# should create an error here, just for thoroughness\n\tend\nend", "def puncture\n end", "def token!\r\n # at line 1:8: ( T__6 | NUMBER | SPACE )\r\n alt_3 = 3\r\n case look_3 = @input.peek( 1 )\r\n when 0x2b then alt_3 = 1\r\n when 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39 then alt_3 = 2\r\n when 0x20 then alt_3 = 3\r\n else\r\n raise NoViableAlternative( \"\", 3, 0 )\r\n end\r\n case alt_3\r\n when 1\r\n # at line 1:10: T__6\r\n t__6!\r\n\r\n when 2\r\n # at line 1:15: NUMBER\r\n number!\r\n\r\n when 3\r\n # at line 1:22: SPACE\r\n space!\r\n\r\n end\r\n end", "def operation_speak_as_literal_punctuation(content, index, children)\n data_property_value = 'literal-punctuation'\n unless index.zero?\n children.push(\n create_content_element(content[0..(index - 1)], data_property_value)\n )\n end\n children.push(\n create_aural_content_element(\n \" #{get_description_of_symbol(content[index..index])} \",\n data_property_value\n )\n )\n\n children.push(\n create_visual_content_element(\n content[index..index],\n data_property_value\n )\n )\n\n children\n end", "def op_tokenize (p_token, lineno, pos)\n\n\tcase p_token\n\twhen \"=\"\n\t\treturn Token.new(\"T_ASSIGNMENT\", p_token, lineno, pos)\n\twhen \"{\"\n\t\treturn Token.new(\"T_LBRACE\", p_token, lineno, pos)\n\twhen \"}\"\n\t\treturn Token.new(\"T_RBRACE\", p_token, lineno, pos)\n\twhen \"(\"\n\t\treturn Token.new(\"T_LPAREN\", p_token, lineno, pos)\n\twhen \")\"\n\t\treturn Token.new(\"T_RPAREN\", p_token, lineno, pos)\n\twhen \"\\\"\"\n\t\treturn Token.new(\"T_QUOTE\", p_token, lineno, pos)\n\twhen \"==\"\n\t\treturn Token.new(\"T_BOOLOP\", p_token, lineno, pos)\n\twhen \"!=\"\n\t\treturn Token.new(\"T_BOOLOP\", p_token, lineno, pos)\n\twhen \"+\"\n\t\treturn Token.new(\"T_PLUS\", p_token, lineno, pos)\n\twhen \"$\"\n\t\treturn Token.new(\"T_EOFSIGN\", p_token, lineno, pos)\n\telse\n\t\traise UnknownSymbolError.new(p_token, lineno, pos)\n\tend\nend", "def peek_next\n fail 'No string specified' unless @str\n\n return Token.new(:eos) if skip_space == :eos\n\n PATTERNS.each do |re, func|\n re.match(@str) do |mat|\n @last_re = re # This is what will be removed\n mat = mat.to_s\n return func.is_a?(Symbol) ? send(func, mat) : instance_exec(mat, &func)\n end\n end\n end", "def getNextToken\n \n #Check if the end has been reached\n if @currentChar == nil\n return\n end\n if @currentChar.match(/\\s/) != nil\n skipWhitespaces\n end\n \n if @currentChar == '%'\n comment\n if @currentChar.match(/\\s/) != nil\n skipWhitespaces\n end\n end \n \n if @currentChar.match(/[A-Za-z0-9_]/) != nil\n return Token.new(NAME, name)\n end\n \n if @currentChar == \"\\\"\"\n return Token.new(STRING, string)\n end\n \n if @currentChar == '{'\n advance\n return Token.new(OPENING_BRACE,'{')\n end\n \n if @currentChar == '}'\n advance\n return Token.new(CLOSING_BRACE,'}')\n end\n \n if @currentChar == '['\n advance\n return Token.new(OPENING_BRACKET,'[')\n end\n \n if @currentChar == ']'\n advance\n return Token.new(CLOSING_BRACKET,']')\n end\n \n if @currentChar == ':'\n advance\n return Token.new(COLON,':')\n end\n \n if @currentChar == '*'\n advance\n return Token.new(ASTERIX,'*')\n end\n \n if @currentChar == '='\n advance\n return Token.new(EQUALS,'=')\n end\n \n if @currentChar == ';'\n advance\n return Token.new(SEMICOLON,';')\n end\n \n if @currentChar == '^'\n advance\n return Token.new(CIRCUMFLEX,'^')\n end\n \n if @currentChar == '+'\n advance\n return Token.new(PLUS,'+')\n end\n if @currentChar == '('\n advance\n return Token.new(OPENING_PARANTHESIS,'(')\n end\n if @currentChar == ')'\n advance\n return Token.new(CLOSING_PARANTHESIS,')')\n end\n if @currentChar == '.'\n advance\n return Token.new(DOT,'.')\n end\n if @currentChar == '#'\n advance\n return Token.new(HASH,'#')\n end\n if @currentChar == ','\n advance\n return Token.new(COMMA,',')\n end\n error\n \n return Token.new(EOF,'EOF') \n \n end", "def ends_with_punctuation?(text)\n text.chars.last.scan(/[[:punct:]]/u).length.positive?\n end", "def peek\n @tokens[@pos]\n end", "def speak_as_literal_punctuation_inherit(element)\n reverse_speak_as(element, 'literal-punctuation')\n reverse_speak_as(element, 'no-punctuation')\n\n isolate_text_node(element)\n\n visit(element, method(:speak_as_literal_punctuation))\n end", "def is_punctuation(letter)\n ascii = letter.ord\n ascii < 65 || (ascii > 90 && ascii < 97) || ascii > 122\nend", "def parse_unary_operation cur_tok\n\n\t\t# Check if UnaryOperation or a Term.\n\t\tif cur_tok and cur_tok.type == \"Operator\" and cur_tok.value == \"`\" and @tokens.peak and @tokens.peak.type == \"Identifier\"\n\t\t\tif @tokens.peak.value == \"i\"\n\t\t\t\[email protected]\n\t\t\t\tcur_ast = Term.new(cur_tok.line, cur_tok.col, imaginary: true) # No magnitude as default = 1\n\t\t\telse\n\t\t\t\tcur_ast = Term.new(cur_tok.line, cur_tok.col, literal_variable: @tokens.next.value) # No magnitude as default = 1\n\t\t\tend\n\t\telse\n\t\t\tcur_ast = UnaryOperation.new(cur_tok.line, cur_tok.col, parse_single_token(cur_tok), parse_next(true))\n\t\tend\n\n\t\t# Use look ahead to see if the UnaryOperation needs to be a child node of a different ast.\n\t\t# This is the case when is_operation is true.\n\t\tif is_operation \n\t\t\treturn parse_operation(cur_ast, true)\n\t\telse\n\t\t\treturn cur_ast \n\t\tend\n\tend", "def next_input_element(hint)\n if ret = @lit_cache[@pos]\n @pos = @lit_nextpos[@pos]\n @head_pos = @pos\n return ret\n end\n pos0 = @pos\n #\n # skip white space here, because ECMA262(5.1.2) says:\n #\n # Simple white space and single-line comments are discarded and\n # do not appear in the stream of input elements for the\n # syntactic grammar.\n #\n while white_space or single_line_comment\n end\n\n ret = line_terminator || multi_line_comment || token\n if ret\n @lit_cache[pos0] = ret\n @lit_nextpos[pos0] = @pos\n @head_pos = @pos\n return ret\n end\n\n if @codes[@pos].nil?\n return nil\n end\n if hint.nil?\n if @codes[@pos] == 0x2f\n ECMA262::LIT_DIV_OR_REGEXP_LITERAL\n else\n nil\n end\n elsif hint == :div\n ret = div_punctuator\n if ret\n @lit_cache[pos0] = ret\n @lit_nextpos[pos0] = @pos\n end\n @head_pos = @pos\n return ret\n elsif hint == :regexp\n ret = regexp_literal\n if ret\n @lit_cache[pos0] = ret\n @lit_nextpos[pos0] = @pos\n end\n @head_pos = @pos\n return ret\n else\n if @codes[@pos] == 0x2f\n ECMA262::LIT_DIV_OR_REGEXP_LITERAL\n else\n nil\n end\n end\n end", "def tokenize_operator(&block) # :yields: SQLTree::Token\n operator = current_char\n if operator == '-' && /[\\d\\.]/ =~ peek_char\n tokenize_number(&block)\n else\n operator << next_char if SQLTree::Token::OPERATORS_HASH.has_key?(operator + peek_char)\n operator_class = SQLTree::Token.const_get(SQLTree::Token::OPERATORS_HASH[operator].to_s.upcase)\n handle_token(operator_class.new(operator), &block)\n end\n end", "def peek # :nodoc:\n @tokens.peek\n end", "def peek\n @tokens[@position]\n end", "def punctuation\n @input = @input.gsub(/\\p{P}/u, '')\n end", "def literal_token\n if match = @chunk.match(OPERATOR)\n value, _ = *match\n else\n value = @chunk[0]\n end\n tag = value\n\n if COMPOUND_ASSIGN.include?(value)\n tag = :COP\n else\n case value\n when '(', '{', '[' then @ends.push(INVERSES[value])\n when ')', '}', ']'\n prev = @tokens[-1]\n pair(value)\n tokens.delete_at(-1) if prev && prev[0] == :TERM\n end\n end\n token(tag, value)\n value.size\n end", "def forward_word\n $multiplier = 1 if !$multiplier or $multiplier == 0\n line = @current_index\n buff = @list[line]\n pos = @curpos\n $multiplier.times {\n found = buff.index(/[[:punct:][:space:]]/, pos)\n if !found\n # if not found, we've lost a counter\n line += 1 # unless eof\n buff = @list[line]\n pos = 0\n else\n pos = found + 1\n end\n $log.debug \" forward_word: pos #{pos} line #{line} buff: #{buff}\"\n }\n @current_index = line\n @curpos = pos\n @buffer = @list[@current_index]\n set_form_row\n set_form_col pos\n @repaint_required = true\n end", "def is_punctuation?(str)\n %w(. , ? ! ; :).include?(str)\n end", "def is_punctuation(token)\n _is_in(token, @punctuation_tags)\n end", "def next()\n if @ss.scan_until(token_re)\n term = @ss.matched\n term_end = @ss.pos\n term_start = term_end - term.size\n else\n return nil\n end\n\n return Token.new(normalize(term), term_start, term_end)\n end", "def parse_next no_binary_operators=false\n\n\t\tcur_tok = @tokens.next\n\t\tif (not cur_tok) or cur_tok.type == \"EOF\" # TODO: Do I use the EOF Token at all?\n\t\t\tputs \"-- Line: #{__LINE__} -- Reached end of file (token stream returned nil).\"\n\t\t\tputs \" cur_tok: #{cur_tok}\"\n\t\t\tputs \" @tokens.peak: #{@tokens.peak}\"\n\t\t\treturn nil\n\t\tend\n\n\t\t# Parsing on current token\n\t\tif is_if_statement cur_tok # Keywords have highest priority \n\t\t\treturn parse_if_statement cur_tok\n\t\telsif is_lambda cur_tok \n\t\t\treturn parse_lambda cur_tok\n\t\telsif is_return_statement cur_tok\n\t\t\treturn parse_return_statement cur_tok\n\t\telsif is_tuple cur_tok\n\t\t\treturn parse_tuple cur_tok\n\t\telsif is_matrix cur_tok\n\t\t\treturn parse_matrix cur_tok\n\t\t\n\t\t# Look ahead parsing\n\t\telsif is_operation and not no_binary_operators # When parsing unary ops, don't parse binary ops.\n\t\t\treturn parse_operation cur_tok\n\t\telsif is_call\n\t\t\treturn parse_call cur_tok\n\t\t\n\t\t# Parsing on current token.\n\t\telsif is_unary_operation cur_tok\n\t\t\treturn parse_unary_operation cur_tok\n\t\t\n\t\telse\n\t\t\treturn parse_single_token cur_tok\n\t\tend\n\tend", "def get_punctuation(string)\n punctuation?(string) ? string.chars.last : ''\nend", "def _ParagraphDelimiter\n\n _save = self.pos\n while true # choice\n _tmp = apply(:_BlockDelimiter)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_PreformattedCommandHead)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_LineBlock)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_Newpage)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_HeadedStart)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_ParagraphDelimiter unless _tmp\n return _tmp\n end", "def is_punctuation?\n %w(. ,).include?(wordform)\n end", "def token\n ready_token\n\n i = @buffer.index(/[\\[\\]()<>{}\\s\\/]/) || @buffer.size\n\n token_chars =\n if i == 0 and @buffer[i,2] == \"<<\" then 2\n elsif i == 0 and @buffer[i,2] == \">>\" then 2\n elsif i == 0 then 1\n else i\n end\n\n strip_space = !(i == 0 and @buffer[0,1] == '(')\n tok = head(token_chars, strip_space)\n\n if tok == \"\"\n nil\n elsif tok[0,1] == \"%\"\n @buffer = \"\"\n token\n else\n tok\n end\n end", "def term\n eat_ws_and_comments\n if peek == \"'\" # quoted\n return quote_term\n elsif peek == \"`\" # quasiquote\n return quasi_term\n elsif peek == \",\"\n return unquote_term\n elsif str = string_term\n return str\n end\n # We begin by trying the various collections\n [:list_term, :array_term, :hash_term].each do |coll|\n coll = method coll\n value = coll.call\n return value unless value.nil?\n end\n # Then it must be a basic term\n naive_term\n end", "def punctuation(sentence, index=sentence.size-1)\r\n sentence[index, sentence.size-index]\r\nend", "def consume\n return nil if @s.eos?\n\n @s.mark\n return create_token(:whitespace) if @s.scan(RE_WHITESPACE)\n\n char = @s.consume\n\n case char.to_sym\n when :'\"'\n consume_string('\"')\n\n when :'#'\n if @s.peek =~ RE_NAME || valid_escape?\n create_token(:hash,\n :type => start_identifier? ? :id : :unrestricted,\n :value => consume_name)\n else\n create_token(:delim, :value => char)\n end\n\n when :'$'\n if @s.peek == '='\n @s.consume\n create_token(:suffix_match)\n else\n create_token(:delim, :value => char)\n end\n\n when :\"'\"\n consume_string(\"'\")\n\n when :'('\n create_token(:'(')\n\n when :')'\n create_token(:')')\n\n when :*\n if @s.peek == '='\n @s.consume\n create_token(:substring_match)\n\n elsif @options[:preserve_hacks] && @s.peek =~ RE_NAME_START\n # NON-STANDARD: IE * hack\n @s.reconsume\n consume_ident\n\n else\n create_token(:delim, :value => char)\n end\n\n when :+\n if start_number?(char + @s.peek(2))\n @s.reconsume\n consume_numeric\n else\n create_token(:delim, :value => char)\n end\n\n when :','\n create_token(:comma)\n\n when :-\n if start_number?(char + @s.peek(2))\n @s.reconsume\n consume_numeric\n elsif start_identifier?(char + @s.peek(2))\n @s.reconsume\n consume_ident\n elsif @s.peek(2) == '->'\n @s.consume\n @s.consume\n create_token(:cdc)\n else\n create_token(:delim, :value => char)\n end\n\n when :'.'\n if start_number?(char + @s.peek(2))\n @s.reconsume\n consume_numeric\n else\n create_token(:delim, :value => char)\n end\n\n when :/\n if @s.peek == '*'\n @s.consume\n\n if text = @s.scan_until(RE_COMMENT_CLOSE)\n text.slice!(-2, 2)\n else\n text = @s.consume_rest\n end\n\n if @options[:preserve_comments]\n create_token(:comment, :value => text)\n else\n consume\n end\n else\n create_token(:delim, :value => char)\n end\n\n when :':'\n create_token(:colon)\n\n when :';'\n create_token(:semicolon)\n\n when :<\n if @s.peek(3) == '!--'\n @s.consume\n @s.consume\n @s.consume\n\n create_token(:cdo)\n else\n create_token(:delim, :value => char)\n end\n\n when :'@'\n if start_identifier?\n create_token(:at_keyword, :value => consume_name)\n else\n create_token(:delim, :value => char)\n end\n\n when :'['\n create_token(:'[')\n\n when :'\\\\'\n if valid_escape?(char + @s.peek)\n @s.reconsume\n consume_ident\n else\n create_token(:delim,\n :error => true,\n :value => char)\n end\n\n when :']'\n create_token(:']')\n\n when :'^'\n if @s.peek == '='\n @s.consume\n create_token(:prefix_match)\n else\n create_token(:delim, :value => char)\n end\n\n when :'{'\n create_token(:'{')\n\n when :'}'\n create_token(:'}')\n\n when :U, :u\n if @s.peek(2) =~ RE_UNICODE_RANGE_START\n @s.consume\n consume_unicode_range\n else\n @s.reconsume\n consume_ident\n end\n\n when :|\n case @s.peek\n when '='\n @s.consume\n create_token(:dash_match)\n\n when '|'\n @s.consume\n create_token(:column)\n\n else\n create_token(:delim, :value => char)\n end\n\n when :~\n if @s.peek == '='\n @s.consume\n create_token(:include_match)\n else\n create_token(:delim, :value => char)\n end\n\n else\n case char\n when RE_DIGIT\n @s.reconsume\n consume_numeric\n\n when RE_NAME_START\n @s.reconsume\n consume_ident\n\n else\n create_token(:delim, :value => char)\n end\n end\n end", "def test_operators_that_dont_belong_together\n tok = initialize_tokenizer( <<-EOD\n +*\n EOD\n )\n\n assert_equal('+', tok.next.token.value) \n assert_equal('*', tok.next.token.value) \n end", "def next_token\n\n if @ss.bol?\n @line+=1\n @[email protected]\n end\n\n position=[@line,@ss.pos-@old_pos+1]\n\n return :eos if @ss.eos?\n\n case\n when text = @ss.scan(NEWLINE)\n next_token()\n when text = @ss.scan(SPACE)\n next_token()\n when text = @ss.scan(COMMENT)\n next_token()\n when text = @ss.scan(ARROW)\n return Token.new [:arrow,text,position]\n when text = @ss.scan(LT)\n return Token.new [:lt,text,position]\n when text = @ss.scan(LBRACK)\n return Token.new [:lbrack,text,position]\n when text = @ss.scan(RBRACK)\n return Token.new [:rbrack,text,position]\n when text = @ss.scan(IDENTIFIER)\n case\n when value = text.match(IDENT)\n return Token.new [:IDENT,text,position]\n when value = text.match(FLOAT)\n return Token.new [:FLOAT,text,position]\n when value = text.match(INT)\n return Token.new [:INT,text,position]\n when value = text.match(STRING)\n return Token.new [:STRING,text,position]\n when value = text.match(MODULE)\n return Token.new [:module,text,position]\n when value = text.match(CLASS)\n return Token.new [:class,text,position]\n when value = text.match(END_)\n return Token.new [:end,text,position]\n when value = text.match(ATTR)\n return Token.new [:attr,text,position]\n when value = text.match(LPAREN)\n return Token.new [:lparen,text,position]\n when value = text.match(RPAREN)\n return Token.new [:rparen,text,position]\n else\n return Token.new [:identifier,text,position]\n end\n else\n x = @ss.getch\n return Token.new [x, x,position]\n end\n end", "def remove_punc_and_add_index(word)\n punc_collection = []\n word.chars.each_with_index do |char, index|\n if char.match?(/[',.\\-]/)\n punc_collection << [char, index]\n end\n end\n punc_collection\nend", "def consume\n return nil if @s.eos?\n\n @s.mark\n\n # Consume comments.\n if comment_token = consume_comments\n if @options[:preserve_comments]\n return comment_token\n else\n return consume\n end\n end\n\n # Consume whitespace.\n return create_token(:whitespace) if @s.scan(RE_WHITESPACE)\n\n char = @s.consume\n\n case char.to_sym\n when :'\"'\n consume_string\n\n when :'#'\n if @s.peek =~ RE_NAME || valid_escape?(@s.peek(2))\n create_token(:hash,\n :type => start_identifier?(@s.peek(3)) ? :id : :unrestricted,\n :value => consume_name)\n else\n create_token(:delim, :value => char)\n end\n\n when :'$'\n if @s.peek == '='\n @s.consume\n create_token(:suffix_match)\n else\n create_token(:delim, :value => char)\n end\n\n when :\"'\"\n consume_string\n\n when :'('\n create_token(:'(')\n\n when :')'\n create_token(:')')\n\n when :*\n if @s.peek == '='\n @s.consume\n create_token(:substring_match)\n\n # Non-standard: Preserve the IE * hack.\n elsif @options[:preserve_hacks] && @s.peek =~ RE_NAME_START\n @s.reconsume\n consume_ident\n\n else\n create_token(:delim, :value => char)\n end\n\n when :+\n if start_number?\n @s.reconsume\n consume_numeric\n else\n create_token(:delim, :value => char)\n end\n\n when :','\n create_token(:comma)\n\n when :-\n nextTwoChars = @s.peek(2)\n nextThreeChars = char + nextTwoChars\n\n if start_number?(nextThreeChars)\n @s.reconsume\n consume_numeric\n elsif nextTwoChars == '->'\n @s.consume\n @s.consume\n create_token(:cdc)\n elsif start_identifier?(nextThreeChars)\n @s.reconsume\n consume_ident\n else\n create_token(:delim, :value => char)\n end\n\n when :'.'\n if start_number?\n @s.reconsume\n consume_numeric\n else\n create_token(:delim, :value => char)\n end\n\n when :':'\n create_token(:colon)\n\n when :';'\n create_token(:semicolon)\n\n when :<\n if @s.peek(3) == '!--'\n @s.consume\n @s.consume\n @s.consume\n\n create_token(:cdo)\n else\n create_token(:delim, :value => char)\n end\n\n when :'@'\n if start_identifier?(@s.peek(3))\n create_token(:at_keyword, :value => consume_name)\n else\n create_token(:delim, :value => char)\n end\n\n when :'['\n create_token(:'[')\n\n when :'\\\\'\n if valid_escape?\n @s.reconsume\n consume_ident\n else\n # Parse error.\n create_token(:delim,\n :error => true,\n :value => char)\n end\n\n when :']'\n create_token(:']')\n\n when :'^'\n if @s.peek == '='\n @s.consume\n create_token(:prefix_match)\n else\n create_token(:delim, :value => char)\n end\n\n when :'{'\n create_token(:'{')\n\n when :'}'\n create_token(:'}')\n\n when :U, :u\n if @s.peek(2) =~ RE_UNICODE_RANGE_START\n @s.consume\n consume_unicode_range\n else\n @s.reconsume\n consume_ident\n end\n\n when :|\n case @s.peek\n when '='\n @s.consume\n create_token(:dash_match)\n\n when '|'\n @s.consume\n create_token(:column)\n\n else\n create_token(:delim, :value => char)\n end\n\n when :~\n if @s.peek == '='\n @s.consume\n create_token(:include_match)\n else\n create_token(:delim, :value => char)\n end\n\n else\n case char\n when RE_DIGIT\n @s.reconsume\n consume_numeric\n\n when RE_NAME_START\n @s.reconsume\n consume_ident\n\n else\n create_token(:delim, :value => char)\n end\n end\n end", "def tokenize_float_literal\n advance # Pass the .\n\n until( /[0-9eE]/.match( cchar ).nil? )\n if cchar == 'e' || cchar == 'E'\n return tokenize_exponent_literal\n end\n advance\n end\n capture_token( :float_literal )\n end", "def cur_token_precedence\n filtered = @infix_parsers.select{|key, val| key.call @current_token}\n return 0 if filtered.size == 0\n _, contents = filtered.first\n contents[0]\n end", "def next_token\n @sy = @tokenizer.next_token\n \n # ignore EOL tokens since no productions would accept them\n while @sy.type == TokenType::EOL_TOKEN\n @sy = @tokenizer.next_token\n end\n end", "def next_token\n result = peek_token\n @start = @finish\n return result if @start >= @expr.length\n\n if @expr[@start].numeric?\n @finish = @start + 1\n while @finish < @expr.length && @expr[@finish].to_s.numeric?\n @finish = @finish + 1\n end\n else\n @finish = @start + 1\n end\n result\n end", "def next_char\n @pos += 1\n if (c = @source[@pos..@pos]) == BACKSLASH\n @pos += 1\n [true, @source[@pos..@pos]]\n else\n [false, c]\n end\n end", "def next_token\n #dputs \"@line: \" + @line\n if @state == :normal\n while true\n temp = _next_token\n unless temp == \"#white_space\" || temp == \"#comment\"\n break\n end\n end\n #dputs \"token: \" + temp\n @current_token = temp\n return temp\n else\n return :Terminate\n end\n \n end", "def peek_token\n return nil if @start >= @expr.length\n if @start == 0 && @finish == 0\n return @expr[0]\n else\n token = @expr[@start...@finish]\n\n if token.empty?\n @finish = @finish + 1\n peek_token\n else\n return token\n end\n end\n end", "def peek_no_space\n return @tokens.first unless @tokens.first.class == TkSPACE\n @tokens[1]\n end", "def tokenize\n return consume_string if @state == :string\n\n trim_stream\n\n # Check if we're dealing with a keyword!\n return create_token(:keyword, consume_pattern(KEYWORD_DEF)) unless @stream.match(KEYWORD_DEF).nil?\n\n # Now we must check to see what else we could be finding. Remember whatever we\n # encounter here is the *start* of whatever token it is; a \" character here means\n # the start of a string..\n if @stream[0].match STRING_START_DEF\n @state = :string\n @partial_string['delimiter'] = @stream[0]\n consume\n\n return nil\n end\n\n return create_token(:terminator, consume) if @stream[0] == ';'\n return create_token(:operator, consume) if @stream[0] == '+'\n\n return create_token(:name, consume_pattern(NAME_DEF)) unless @stream.match(NAME_DEF).nil?\n\n raise_tokenizer_error \"Illegal character '#{@stream[0]}' - unable to form a token with this character!\"\n end", "def handle_parens_unary_minus(tokens)\n handled = []\n current_pos = 0\n minus_is_unary = true\n while current_pos < tokens.length\n# puts \"handled #{handled}, cp #{current_pos}, miu #{minus_is_unary}, tokens remain #{tokens[current_pos..-1]}\"\n case tokens[current_pos]\n when '('\n last_value, current_pos = eval_parens(tokens, current_pos)\n handled.push(last_value)\n minus_is_unary = false\n when '+', '-', '*', '/'\n if (minus_is_unary)\n minus_is_unary = false\n if tokens[current_pos] == '-'\n handled.push(-string_to_number(tokens[current_pos+1]))\n current_pos += 1\n else\n raise \"Oops, unexpected operator #{tokens[current_pos]}\"\n end\n else\n minus_is_unary = true\n handled.push(tokens[current_pos])\n end\n else\n handled.push(string_to_number(tokens[current_pos]))\n minus_is_unary = false\n end\n current_pos += 1\n end\n handled\nend", "def peek\n @tokens.at(@current)\n end", "def test_token(token_type, offset = 0)\n debug \"Testing for #{token_type} with offset #{offset}\", :verbose\n\n peeked = peek_token(offset)\n !peeked.nil? && peeked.type == token_type\n end", "def get_token\n\t\tt = Token.new\n\t\tcase @src[@lineno][@linepos]\n\t\t\twhen ' ' then\n\t\t\t\tskip_whitespace\n\t\t\twhen '\\f' then #less likely to see this\n\t\t\t\tskip_whitespace\n\t\t\twhen '\\t' then\n\t\t\t\tskip_whitespace\n\t\t\twhen '\\v' then\n\t\t\t\tskip_whitespace\n\t\t\twhen '0'..'9' then\n\t\t\t\tt = parse_number\n\t\t\twhen 'A-Z' then\n\t\t\t\tt = parse_name\n\t\t\twhen 'a-z' then\n\t\t\t\tparse_name\n\t\t\twhen '_' then\n\t\t\t\tt = parse_name\n\t\t\twhen /[~!$%\\^&*()-+=|{}\\[\\]\\:;\\/?<>,.]/ then #very much check\n\t\t\t\tt = parse_operator\n\t\t\twhen '\"' then\n\t\t\t\tt = parse_string\n\t\tend\n\tend", "def next_token\n return process_string if lex_strterm\n self.cmd_state = self.command_start\n self.command_start = false\n self.space_seen = false # TODO: rename token_seen?\n self.last_state = lex_state\n\n token = nil\n\n until ss.eos? or token do\n token =\n case state\n when nil then\n case\n when ss.skip(/[\\ \\t\\r\\f\\v]/) then\n action { self.space_seen = true; next }\n when text = ss.scan(/\\n|\\#/) then\n process_newline_or_comment text\n when text = ss.scan(/[\\]\\)\\}]/) then\n process_brace_close text\n when ss.match?(/\\!/) then\n case\n when is_after_operator? && (ss.skip(/\\!\\@/)) then\n action { result EXPR_ARG, :tUBANG, \"!@\" }\n when text = ss.scan(/\\![=~]?/) then\n action { result :arg_state, TOKENS[text], text }\n end # group /\\!/\n when ss.match?(/\\./) then\n case\n when text = ss.scan(/\\.\\.\\.?/) then\n action { result EXPR_BEG, TOKENS[text], text }\n when ss.skip(/\\.\\d/) then\n action { rb_compile_error \"no .<digit> floating literal anymore put 0 before dot\" }\n when ss.skip(/\\./) then\n action { self.lex_state = EXPR_BEG; result EXPR_DOT, :tDOT, \".\" }\n end # group /\\./\n when text = ss.scan(/\\(/) then\n process_paren text\n when text = ss.scan(/\\,/) then\n action { result EXPR_PAR, TOKENS[text], text }\n when ss.match?(/=/) then\n case\n when text = ss.scan(/\\=\\=\\=|\\=\\=|\\=~|\\=>|\\=(?!begin\\b)/) then\n action { result arg_state, TOKENS[text], text }\n when bol? && (text = ss.scan(/\\=begin(?=\\s)/)) then\n process_begin text\n when text = ss.scan(/\\=(?=begin\\b)/) then\n action { result arg_state, TOKENS[text], text }\n end # group /=/\n when ruby22_label? && (text = ss.scan(/\\\"#{SIMPLE_STRING}\\\":/o)) then\n process_label text\n when text = ss.scan(/\\\"(#{SIMPLE_STRING})\\\"/o) then\n action { result EXPR_END, :tSTRING, text[1..-2].gsub(ESC) { unescape $1 } }\n when text = ss.scan(/\\\"/) then\n action { string STR_DQUOTE; result nil, :tSTRING_BEG, text }\n when text = ss.scan(/\\@\\@?\\d/) then\n action { rb_compile_error \"`#{text}` is not allowed as a variable name\" }\n when text = ss.scan(/\\@\\@?#{IDENT_CHAR}+/o) then\n process_ivar text\n when ss.match?(/:/) then\n case\n when not_end? && (text = ss.scan(/:([a-zA-Z_]#{IDENT_CHAR}*(?:[?]|[!](?!=)|=(?==>)|=(?![=>]))?)/o)) then\n process_symbol text\n when not_end? && (text = ss.scan(/\\:\\\"(#{SIMPLE_STRING})\\\"/o)) then\n process_symbol text\n when not_end? && (text = ss.scan(/\\:\\'(#{SSTRING})\\'/o)) then\n process_symbol text\n when text = ss.scan(/\\:\\:/) then\n process_colon2 text\n when text = ss.scan(/\\:/) then\n process_colon1 text\n end # group /:/\n when ss.skip(/->/) then\n action { result EXPR_ENDFN, :tLAMBDA, nil }\n when text = ss.scan(/[+-]/) then\n process_plus_minus text\n when ss.match?(/[+\\d]/) then\n case\n when ss.skip(/#{NUM_BAD}/o) then\n action { rb_compile_error \"Invalid numeric format\" }\n when ss.skip(/#{INT_DEC}/o) then\n action { int_with_base 10 }\n when ss.skip(/#{INT_HEX}/o) then\n action { int_with_base 16 }\n when ss.skip(/#{INT_BIN}/o) then\n action { int_with_base 2 }\n when ss.skip(/#{INT_OCT_BAD}/o) then\n action { rb_compile_error \"Illegal octal digit.\" }\n when ss.skip(/#{INT_OCT}/o) then\n action { int_with_base 8 }\n when ss.skip(/#{FLOAT_BAD}/o) then\n action { rb_compile_error \"Trailing '_' in number.\" }\n when text = ss.scan(/#{FLOAT}/o) then\n process_float text\n when ss.skip(/#{INT_DEC2}/o) then\n action { int_with_base 10 }\n when ss.skip(/[0-9]/) then\n action { rb_compile_error \"Bad number format\" }\n end # group /[+\\d]/\n when text = ss.scan(/\\[/) then\n process_square_bracket text\n when was_label? && (text = ss.scan(/\\'#{SSTRING}\\':?/o)) then\n process_label_or_string text\n when ss.match?(/\\|/) then\n case\n when ss.skip(/\\|\\|\\=/) then\n action { result EXPR_BEG, :tOP_ASGN, \"||\" }\n when ss.skip(/\\|\\|/) then\n action { result EXPR_BEG, :tOROP, \"||\" }\n when ss.skip(/\\|\\=/) then\n action { result EXPR_BEG, :tOP_ASGN, \"|\" }\n when ss.skip(/\\|/) then\n action { state = is_after_operator? ? EXPR_ARG : EXPR_PAR; result state, :tPIPE, \"|\" }\n end # group /\\|/\n when text = ss.scan(/\\{/) then\n process_brace_open text\n when ss.match?(/\\*/) then\n case\n when ss.skip(/\\*\\*=/) then\n action { result EXPR_BEG, :tOP_ASGN, \"**\" }\n when ss.skip(/\\*\\*/) then\n action { result(:arg_state, space_vs_beginning(:tDSTAR, :tDSTAR, :tPOW), \"**\") }\n when ss.skip(/\\*\\=/) then\n action { result(EXPR_BEG, :tOP_ASGN, \"*\") }\n when ss.skip(/\\*/) then\n action { result(:arg_state, space_vs_beginning(:tSTAR, :tSTAR, :tSTAR2), \"*\") }\n end # group /\\*/\n when ss.match?(/</) then\n case\n when ss.skip(/\\<\\=\\>/) then\n action { result :arg_state, :tCMP, \"<=>\" }\n when ss.skip(/\\<\\=/) then\n action { result :arg_state, :tLEQ, \"<=\" }\n when ss.skip(/\\<\\<\\=/) then\n action { result EXPR_BEG, :tOP_ASGN, \"<<\" }\n when text = ss.scan(/\\<\\</) then\n process_lchevron text\n when ss.skip(/\\</) then\n action { result :arg_state, :tLT, \"<\" }\n end # group /</\n when ss.match?(/>/) then\n case\n when ss.skip(/\\>\\=/) then\n action { result :arg_state, :tGEQ, \">=\" }\n when ss.skip(/\\>\\>=/) then\n action { result EXPR_BEG, :tOP_ASGN, \">>\" }\n when ss.skip(/\\>\\>/) then\n action { result :arg_state, :tRSHFT, \">>\" }\n when ss.skip(/\\>/) then\n action { result :arg_state, :tGT, \">\" }\n end # group />/\n when ss.match?(/\\`/) then\n case\n when expr_fname? && (ss.skip(/\\`/)) then\n action { result(EXPR_END, :tBACK_REF2, \"`\") }\n when expr_dot? && (ss.skip(/\\`/)) then\n action { result((cmd_state ? EXPR_CMDARG : EXPR_ARG), :tBACK_REF2, \"`\") }\n when ss.skip(/\\`/) then\n action { string STR_XQUOTE, '`'; result(nil, :tXSTRING_BEG, \"`\") }\n end # group /\\`/\n when text = ss.scan(/\\?/) then\n process_questionmark text\n when ss.match?(/&/) then\n case\n when ss.skip(/\\&\\&\\=/) then\n action { result(EXPR_BEG, :tOP_ASGN, \"&&\") }\n when ss.skip(/\\&\\&/) then\n action { result(EXPR_BEG, :tANDOP, \"&&\") }\n when ss.skip(/\\&\\=/) then\n action { result(EXPR_BEG, :tOP_ASGN, \"&\" ) }\n when ss.skip(/\\&\\./) then\n action { result(EXPR_DOT, :tLONELY, \"&.\") }\n when text = ss.scan(/\\&/) then\n process_amper text\n end # group /&/\n when text = ss.scan(/\\//) then\n process_slash text\n when ss.match?(/\\^/) then\n case\n when ss.skip(/\\^=/) then\n action { result(EXPR_BEG, :tOP_ASGN, \"^\") }\n when ss.skip(/\\^/) then\n action { result(:arg_state, :tCARET, \"^\") }\n end # group /\\^/\n when ss.skip(/\\;/) then\n action { self.command_start = true; result(EXPR_BEG, :tSEMI, \";\") }\n when ss.match?(/~/) then\n case\n when is_after_operator? && (ss.skip(/\\~@/)) then\n action { result(:arg_state, :tTILDE, \"~\") }\n when ss.skip(/\\~/) then\n action { result(:arg_state, :tTILDE, \"~\") }\n end # group /~/\n when ss.match?(/\\\\/) then\n case\n when ss.skip(/\\\\\\r?\\n/) then\n action { self.lineno += 1; self.space_seen = true; next }\n when ss.skip(/\\\\/) then\n action { rb_compile_error \"bare backslash only allowed before newline\" }\n end # group /\\\\/\n when text = ss.scan(/\\%/) then\n process_percent text\n when ss.match?(/\\$/) then\n case\n when text = ss.scan(/\\$_\\w+/) then\n process_gvar text\n when text = ss.scan(/\\$_/) then\n process_gvar text\n when text = ss.scan(/\\$[~*$?!@\\/\\\\;,.=:<>\\\"]|\\$-\\w?/) then\n process_gvar text\n when in_fname? && (text = ss.scan(/\\$([\\&\\`\\'\\+])/)) then\n process_gvar text\n when text = ss.scan(/\\$([\\&\\`\\'\\+])/) then\n process_backref text\n when in_fname? && (text = ss.scan(/\\$([1-9]\\d*)/)) then\n process_gvar text\n when text = ss.scan(/\\$([1-9]\\d*)/) then\n process_nthref text\n when text = ss.scan(/\\$0/) then\n process_gvar text\n when text = ss.scan(/\\$[^[:ascii:]]+/) then\n process_gvar text\n when text = ss.scan(/\\$\\W|\\$\\z/) then\n process_gvar_oddity text\n when text = ss.scan(/\\$\\w+/) then\n process_gvar text\n end # group /\\$/\n when text = ss.scan(/\\_/) then\n process_underscore text\n when text = ss.scan(/#{IDENT}/o) then\n process_token text\n when ss.skip(/\\004|\\032|\\000|\\Z/) then\n action { [RubyLexer::EOF, RubyLexer::EOF] }\n when text = ss.scan(/./) then\n action { rb_compile_error \"Invalid char #{text.inspect} in expression\" }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def next_token\n return @extra_tokens.pop unless @extra_tokens.empty?\n\n skip_whitespace\n c = @sql[@pos, 1]\n return next_string(c) if quote?(c)\n\n first_is_identifier_char = identifier_char?(c)\n t = c\n @pos += 1\n while @pos < @length\n c = @sql[@pos, 1]\n break if c == ' '\n\n this_is_identifier_char = identifier_char?(c)\n break if first_is_identifier_char != this_is_identifier_char && @length > 0\n break if !this_is_identifier_char && quote?(c)\n\n t << c\n @pos += 1\n end\n\n case t\n when ''\n nil\n when /^\\d+$/\n t.to_i\n else\n t\n end\n end", "def token_terminator\n %r{(?:(?=[[:space:]])|$)}\n end", "def get_tokenize\n @lexeme = nil\n loop {\n case @string\n\t when nil\t\t# the end\n\t @sym = nil\n\t break\n when \"\" # the end\n @sym = nil\n break\n when /\\A[\\r\\n\\t ]+/m\t# skip whitespace\n @string = $'\n when /\\A\\(/m # skip comment\n comment\n when /\\A\"\"/ # skip empty quoted text\n @string = $'\n when /\\A[\\w!$%&\\'*+\\/=?^\\`{\\}|~#-]+/m\n @string = $'\n @sym = SYM_ATOM\n break\n when /\\A\"(.*?([^\\\\]|\\\\\\\\))\"/m\n @string = $'\n @sym = SYM_QTEXT\n @lexeme = $1.gsub(/\\\\(.)/, '\\1')\n break\n when /\\A</\n @string = $'\n @sym = SYM_LESS_THAN\n break\n when /\\A>/\n @string = $'\n @sym = SYM_GREATER_THAN\n break\n when /\\A@/\n @string = $'\n @sym = SYM_AT_SIGN\n break\n when /\\A,/\n @string = $'\n @sym = SYM_COMMA\n break\n when /\\A:/\n @string = $'\n @sym = SYM_COLON\n break\n when /\\A;/\n @string = $'\n @sym = SYM_SEMI_COLON\n break\n when /\\A\\./\n @string = $'\n @sym = SYM_PERIOD\n break\n\t when /\\A(\\[.*?([^\\\\]|\\\\\\\\)\\])/m\n\t @string = $'\n\t @sym = SYM_DOMAIN_LITERAL\n\t @lexeme = $1.gsub(/(^|[^\\\\])[\\r\\n\\t ]+/, '\\1').gsub(/\\\\(.)/, '\\1')\n\t break\n when /\\A[\\200-\\377\\w!$%&\\'*+\\/=?^\\`{\\}|~#-]+/nm\n # This is just like SYM_ATOM, but includes all characters\n # with high bits. This is so we can allow such tokens in\n # the display name portion of an address even though it\n # violates the RFCs.\n @string = $'\n @sym = SYM_ATOM_NON_ASCII\n break\n when /\\A./\n @string = $'\t# garbage\n\t error('garbage character in string')\n else\n raise \"internal error, @string is #{@string.inspect}\"\n end\n }\n if @sym\n @lexeme ||= $&\n end\n end", "def peek\n @tok ||= read_token\n end", "def Primario\n if @token.get_tipo == \"num\" then\n t, msg = le(\"num\")\n if t\n return Hash[\"tag\" => \"num\", \"1\" => t.get_lexama.to_f]\n else\n return nil, msg\n end\n elsif @token.get_tipo == \"(\" then\n t, msg = le(\"(\")\n if t then\n exp1, msg = self.Exp\n if exp1 then\n t, msg = le(\")\")\n if t then\n return exp1\n else\n return nil, msg \n end\n else\n return nil, msg \n end\n else\n return nil, msg \n end \n end\n\n return nil, \"Token inesperado, esperava num ou (, encontrou #{@token.get_tipo}\"\n end", "def parse_single_token cur_tok\n\n\t\tcase cur_tok.type\n\t\twhen \"Digit\"\n\t\t\tif cur_tok.value.include? \".\"\n\t\t\t\treturn Term.new(cur_tok.line, cur_tok.col, magnitude: cur_tok.value.to_f)\n\t\t\telse\n\t\t\t\treturn Term.new(cur_tok.line, cur_tok.col, magnitude: cur_tok.value.to_i)\n\t\t\tend\n\t\twhen \"Identifier\"\n\t\t\treturn Reference.new(cur_tok.line, cur_tok.col, cur_tok.value)\n\t\twhen \"Keyword\"\n\t\t\tif cur_tok.value == \"true\" or cur_tok.value == \"false\"\n\t\t\t\treturn Boolean.new(cur_tok.line, cur_tok.col, cur_tok.value == \"true\")\n\t\t\telse\n\t\t\t\tthrow_error(\"Misplaced keyword.\", cur_tok)\n\t\t\tend\n\t\twhen \"Operator\"\n\t\t\treturn Operator.new(cur_tok.line, cur_tok.col, cur_tok.value)\n\t\twhen \"Punctuation\"\n\t\t\tthrow_error(\"Misplaced Punctuation.\", cur_tok)\n\t\twhen \"String\"\n\t\t\tthrow_error(\"Strings are not implemented in this parser.\", cur_tok)\n\t\twhen \"EOF\"\n\t\t\tthrow_error(\"EOF occured when parsing single token (the lexer & parser aren't talking to each other properly).\", cur_tok)\n\t\telse\n\t\t\tthrow_error(\"parse_single_token failed to identify the type of the token (the lexer & parser aren't talking to each other properly).\", cur_tok)\n\t\tend\n\tend", "def decimal_literal\n pos0 = @pos\n code = @codes[@pos]\n\n if code.nil?\n return nil\n elsif code == 0x2e #.\n @pos += 1\n f = decimal_digits\n if f.nil? #=> this period is punctuator\n @pos = pos0 + 1\n return ECMA262::PUNC_PERIOD\n end\n if (code = @codes[@pos]) == 0x65 || code == 0x45\n @pos += 1\n e = exponent_part\n end\n if identifier_start?(@codes[@pos])\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n end\n\n return ECMA262::ECMA262Numeric.new('0', f, e)\n elsif code == 0x30 # zero\n i = \"0\"\n @pos += 1\n if @codes[@pos] == 0x2e #.\n @pos += 1\n f = decimal_digits\n if (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E\n @pos += 1\n e = exponent_part\n end\n elsif (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E\n @pos += 1\n e = exponent_part\n end\n if identifier_start?(@codes[@pos])\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n end\n\n return ECMA262::ECMA262Numeric.new(i, f, e)\n elsif code >= 0x31 and code <= 0x39\n i = decimal_digits\n if @codes[@pos] == 0x2e #.\n @pos += 1\n f = decimal_digits\n if (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E\n @pos += 1\n e = exponent_part\n end\n elsif (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E\n @pos += 1\n e = exponent_part\n end\n if identifier_start?(@codes[@pos])\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n end\n\n return ECMA262::ECMA262Numeric.new(i, f, e)\n end\n\n nil\n end", "def parse_factor\n case current\n when :L_PARANTH then\n expect(:L_PARANTH)\n exp = parse_expression\n expect(:R_PARANTH)\n\n exp\n when :ID then\n parse_variable_exp\n when :KW_NEW then\n parse_class_init\n else\n parse_constant\n end\n end", "def is_word?\n not is_punctuation?\n end", "def isToken(c)\r\n\tvalidTokens = [\"(\", \")\", \"+\", \"-\", \"*\",\r\n\t \"/\", \"^\", \"cos\", \"sin\", \"tan\", \"ln\", \"atan\", \r\n\t \"atan2\", \",\"].include?(c)\r\nend", "def literal_char!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 43)\n\n \n # - - - - main rule block - - - -\n # at line 491:2: ( ESC | ~ ( '\\\\'' | '\\\\\\\\' ) )\n alt_6 = 2\n look_6_0 = @input.peek(1)\n\n if (look_6_0 == ?\\\\) \n alt_6 = 1\n elsif (look_6_0.between?(0x0000, ?&) || look_6_0.between?(?(, ?[) || look_6_0.between?(?], 0xFFFF)) \n alt_6 = 2\n else\n nvae = NoViableAlternative(\"\", 6, 0)\n raise nvae\n end\n case alt_6\n when 1\n # at line 491:4: ESC\n esc!\n\n when 2\n # at line 492:4: ~ ( '\\\\'' | '\\\\\\\\' )\n if @input.peek(1).between?(0x0000, ?&) || @input.peek(1).between?(?(, ?[) || @input.peek(1).between?(?], 0x00FF)\n @input.consume\n else\n mse = MismatchedSet(nil)\n recover(mse)\n raise mse\n end\n\n\n\n end\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 43)\n\n end", "def advance\n r = yylex\n self.token = r\n\n raise \"yylex returned nil\" unless r\n\n return RubyLexer::EOF != r\n end", "def scan_for_commas(token); end", "def parse\n length = @chars.length\n @index = 0\n while @index < length\n @tokens << if custom?\n parse_custom\n else\n parse_char\n end\n @index += 1\n end\n @tokens\n end", "def peek_token\n token = @tokens.first || []\n p :peek => token if @debug\n token\n end", "def peek_token\n token = @tokens.first || []\n p :peek => token if @debug\n token\n end", "def handle_naive_term(term, file, line, col)\n # First check to see if it's a number\n value = number? term\n return Token.new value, file, line, col if value\n # Ok, so it must be a symbol, a keyword, a literal or an identifier.\n if @literals.has_key?(s=term.to_sym) # special keyword\n T(file, line, col, @literals[s])\n elsif term[0] == \":\" # Symbol\n error file, line, col, \": is not a valid symbol\", term.size == 1\n A(file, line, col, :quote, term[1..-1].to_sym)\n elsif term[-1] == \":\" # Keyword\n A(file, line, col, :keyword, term.to_sym)\n else # identifier\n T(file, line, col, term.to_sym)\n end\n end", "def category_term\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 3 )\n value = nil\n term10 = nil\n\n begin\n # at line 68:37: ( WS )? term\n # at line 68:37: ( WS )?\n alt_7 = 2\n look_7_0 = @input.peek(1)\n\n if (look_7_0 == WS)\n alt_7 = 1\n end\n case alt_7\n when 1\n # at line 68:37: WS\n match(WS, TOKENS_FOLLOWING_WS_IN_category_term_131)\n\n end\n @state.following.push(TOKENS_FOLLOWING_term_IN_category_term_134)\n term10 = term\n @state.following.pop\n # --> action\n value = (term10 && @input.to_s(term10.start, term10.stop))\n # <-- action\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 3 )\n\n end\n\n return value\n end", "def tokenize(content, ptype = T.unsafe(nil)); end", "def xxxtest_a_symbol_is_not_an_operator\n tok = initialize_tokenizer( <<-EOD\n #\n EOD\n )\n assert_not_equal(Token::OP, tok.next.token.type)\n end", "def factor\n if @tokens[@i].type == :lparn\n @i += 1 # lparn\n ret = expr\n @i += 1 # rparn\n elsif @tokens[@i].type == :iden\n ret = iden\n else\n ret = num\n end\n ret\n end", "def text_token(text, kind); end", "def text_token(text, kind); end", "def term\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 21 )\n return_value = TermReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n begin\n # at line 137:10: LOALPHA ( LOALPHA | DIGIT | '-' | '_' )*\n match(LOALPHA, TOKENS_FOLLOWING_LOALPHA_IN_term_876)\n # at line 137:18: ( LOALPHA | DIGIT | '-' | '_' )*\n while true # decision 33\n alt_33 = 2\n look_33_0 = @input.peek(1)\n\n if (look_33_0 == LOALPHA || look_33_0 == DIGIT || look_33_0 == T__30 || look_33_0 == T__39)\n alt_33 = 1\n\n end\n case alt_33\n when 1\n # at line\n if @input.peek(1) == LOALPHA || @input.peek(1) == DIGIT || @input.peek(1) == T__30 || @input.peek(1) == T__39\n @input.consume\n @state.error_recovery = false\n else\n mse = MismatchedSet(nil)\n raise mse\n end\n\n\n else\n break # out of loop for decision 33\n end\n end # loop for decision 33\n # - - - - - - - rule clean up - - - - - - - -\n return_value.stop = @input.look(-1)\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 21 )\n\n end\n\n return return_value\n end", "def category_term\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 3 )\n value = nil\n term10 = nil\n\n begin\n # at line 41:37: ( WS )? term\n # at line 41:37: ( WS )?\n alt_7 = 2\n look_7_0 = @input.peek( 1 )\n\n if ( look_7_0 == WS )\n alt_7 = 1\n end\n case alt_7\n when 1\n # at line 41:37: WS\n match( WS, TOKENS_FOLLOWING_WS_IN_category_term_128 )\n\n end\n @state.following.push( TOKENS_FOLLOWING_term_IN_category_term_131 )\n term10 = term\n @state.following.pop\n # --> action\n value = ( term10 && @input.to_s( term10.start, term10.stop ) ) \n # <-- action\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 3 )\n\n end\n \n return value\n end", "def term\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 21 )\n return_value = TermReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n begin\n # at line 111:10: ( LOALPHA | reserved_words ) ( LOALPHA | DIGIT | DASH | UNDERSCORE | DOT | reserved_words )*\n # at line 111:10: ( LOALPHA | reserved_words )\n alt_32 = 2\n look_32_0 = @input.peek( 1 )\n\n if ( look_32_0 == LOALPHA )\n alt_32 = 1\n elsif ( look_32_0 == SCHEME || look_32_0.between?( CLASS, ACTIONS ) || look_32_0.between?( SELF, CATEGORY ) || look_32_0.between?( KIND, ACTION ) || look_32_0.between?( LINK, TERM ) )\n alt_32 = 2\n else\n raise NoViableAlternative( \"\", 32, 0 )\n end\n case alt_32\n when 1\n # at line 111:12: LOALPHA\n match( LOALPHA, TOKENS_FOLLOWING_LOALPHA_IN_term_923 )\n\n when 2\n # at line 111:22: reserved_words\n @state.following.push( TOKENS_FOLLOWING_reserved_words_IN_term_927 )\n reserved_words\n @state.following.pop\n\n end\n # at line 111:39: ( LOALPHA | DIGIT | DASH | UNDERSCORE | DOT | reserved_words )*\n while true # decision 33\n alt_33 = 7\n case look_33 = @input.peek( 1 )\n when LOALPHA then alt_33 = 1\n when DIGIT then alt_33 = 2\n when DASH then alt_33 = 3\n when UNDERSCORE then alt_33 = 4\n when DOT then alt_33 = 5\n when SCHEME, CLASS, TITLE, REL, LOCATION, ATTRIBUTES, ACTIONS, SELF, CATEGORY, KIND, MIXIN, ACTION, LINK, TERM then alt_33 = 6\n end\n case alt_33\n when 1\n # at line 111:41: LOALPHA\n match( LOALPHA, TOKENS_FOLLOWING_LOALPHA_IN_term_933 )\n\n when 2\n # at line 111:51: DIGIT\n match( DIGIT, TOKENS_FOLLOWING_DIGIT_IN_term_937 )\n\n when 3\n # at line 111:59: DASH\n match( DASH, TOKENS_FOLLOWING_DASH_IN_term_941 )\n\n when 4\n # at line 111:66: UNDERSCORE\n match( UNDERSCORE, TOKENS_FOLLOWING_UNDERSCORE_IN_term_945 )\n\n when 5\n # at line 111:79: DOT\n match( DOT, TOKENS_FOLLOWING_DOT_IN_term_949 )\n\n when 6\n # at line 111:85: reserved_words\n @state.following.push( TOKENS_FOLLOWING_reserved_words_IN_term_953 )\n reserved_words\n @state.following.pop\n\n else\n break # out of loop for decision 33\n end\n end # loop for decision 33\n # - - - - - - - rule clean up - - - - - - - -\n return_value.stop = @input.look( -1 )\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 21 )\n\n end\n \n return return_value\n end", "def next_token; end", "def next_token\n return if @scanner.eos?\n\n if @scanner.scan(SKIP_PATTERN)\n @column += @scanner[:before].length\n\n new_lines = @scanner[:new_line].delete(\"\\r\")\n unless new_lines.empty?\n @lineno += new_lines.length\n @column = 0\n end\n\n @column += @scanner[:after].length\n end\n\n token =\n case\n when try_match(REFERENCE_PATTERN)\n Token.new :REFERENCE, @scanner[:identifier], @lineno, @column\n when try_match(PATH_PATTERN)\n Token.new :PATH, @scanner[:identifier], @lineno, @column\n when try_match(FILTER_PATTERN) && @scanner.check(OPEN_PAREN_PATTERN)\n Token.new :FILTER, \"?\", @lineno, @column\n when try_match(OPEN_BRACKET_PATTERN)\n @state_stack.push Token.new :OPEN_BRACKET, \"[\", @lineno, @column\n @state_stack.last\n when try_match(OPEN_PAREN_PATTERN)\n @state_stack.push Token.new :OPEN_PAREN, \"(\", @lineno, @column\n @state_stack.last\n when try_match(CLOSE_BRACKET_PATTERN)\n last = @state_stack.pop\n unless last\n raise TokenizeError.unexpected(\"]\", @lineno, @column)\n end\n unless last.type == :OPEN_BRACKET\n raise TokenizeError.unbalanced(\"[\", last.lineno, last.column)\n end\n Token.new :CLOSE_BRACKET, \"]\", @lineno, @column\n when try_match(CLOSE_PAREN_PATTERN)\n last = @state_stack.pop\n unless last\n raise TokenizeError.unexpected(\")\", @lineno, @column)\n end\n unless last.type == :OPEN_PAREN\n raise TokenizeError.unbalanced(\"(\", last.lineno, last.column)\n end\n Token.new :CLOSE_PAREN, \")\", @lineno, @column\n when try_match(SELF_PATTERN)\n Token.new :SELF, \"@\", @lineno, @column\n when try_match(NUMBER_PATTERN)\n Token.new :NUMBER, BigDecimal.new(@last_captured), @lineno, @column\n when try_match(STRING_PATTERN)\n Token.new :STRING, @scanner[:str], @lineno, @column\n when try_match(TRUE_PATTERN)\n Token.new :BOOLEAN, true, @lineno, @column\n when try_match(FALSE_PATTERN)\n Token.new :BOOLEAN, false, @lineno, @column\n when try_match(COLON_PATTERN)\n Token.new :COLON, \":\", @lineno, @column\n when try_match(COMMA_PATTERN)\n Token.new :COMMA, \",\", @lineno, @column\n when try_match(ADD_PATTERN)\n Token.new :ADD, \"+\", @lineno, @column\n when try_match(SUBTRACT_PATTERN)\n case @tokens.last&.type\n when nil, :OPEN_PAREN, :OPEN_BRACKET, :COMMA, :COLON, :POW, :MOD, :ADD, :SUBTRACT, :MULTIPLY, :DIVIDE\n if @scanner.check(NUMBER_PATTERN) ||\n @scanner.check(REFERENCE_PATTERN) ||\n @scanner.check(SUBTRACT_PATTERN) ||\n @scanner.check(OPEN_PAREN_PATTERN)\n Token.new :UMINUS, \"-\", @lineno, @column\n else\n raise TokenizeError.unexpected(\"-\", @lineno, @column)\n end\n else\n Token.new :SUBTRACT, \"-\", @lineno, @column\n end\n when try_match(MULTIPLY_PATTERN)\n Token.new :MULTIPLY, \"*\", @lineno, @column\n when try_match(DIVIDE_PATTERN)\n Token.new :DIVIDE, \"/\", @lineno, @column\n when try_match(POW_PATTERN)\n Token.new :POW, \"^\", @lineno, @column\n when try_match(MOD_PATTERN)\n Token.new :MOD, \"%\", @lineno, @column\n when try_match(EQUAL_TO_PATTERN)\n Token.new :EQUAL_TO, \"==\", @lineno, @column\n when try_match(NOT_EQUAL_TO_PATTERN)\n Token.new :NOT_EQUAL_TO, \"!=\", @lineno, @column\n when try_match(GREATER_THAN_OR_EQUAL_TO_PATTERN)\n Token.new :GREATER_THAN_OR_EQUAL_TO, \">=\", @lineno, @column\n when try_match(GREATER_THAN_PATTERN)\n Token.new :GREATER_THAN, \">\", @lineno, @column\n when try_match(LESS_THAN_OR_EQUAL_TO_PATTERN)\n Token.new :LESS_THAN_OR_EQUAL_TO, \"<=\", @lineno, @column\n when try_match(LESS_THAN_PATTERN)\n Token.new :LESS_THAN, \"<\", @lineno, @column\n when try_match(AND_PATTERN)\n Token.new :AND, \"&&\", @lineno, @column\n when try_match(OR_PATTERN)\n Token.new :OR, \"||\", @lineno, @column\n when try_match(NOT_PATTERN)\n Token.new :NOT, \"!\", @lineno, @column\n when try_match(INTERSECT_PATTERN)\n Token.new :INTERSECT, \"&\", @lineno, @column\n when try_match(UNION_PATTERN)\n Token.new :UNION, \"|\", @lineno, @column\n when try_match(IDENTIFIER_PATTERN) && @scanner.check(OPEN_PAREN_PATTERN)\n unless @scanner.check(OPEN_PAREN_PATTERN)\n raise TokenizeError.unexpected(@scanner.peek(7), @lineno, @column)\n end\n Token.new :FUNCTION, @last_captured, @lineno, @column\n else\n raise TokenizeError.unexpected(@scanner.peek(7), @lineno, @column)\n end\n\n @column += @last_captured.length\n @tokens << token\n\n token\n end", "def lex\n @index += 1\n while lexer.tokens[@index] === :COMMENT\n @index += 1\n end\n lexer.tokens[@index] or unexpected_error(:EOF)\n end", "def parse_lit\n case l.front.type\n when :str then parse_str_lit\n when :chr then parse_char_lit\n when :num then parse_num_lit\n else\n error \"expected a literal\"\n end\n end", "def ppp?(morpheme)\n morpheme[:pos] == '助詞'\n end", "def lexeme_delimiter?(pos)\n @line[pos] == '!' || @line[pos] == ',' ||\n @line[pos] == \"\\n\" || space?(@line[pos]) ||\n @line[pos] == '…' || @line[pos, 3] == '...'\n end", "def next\n @tok ||= read_token\n @tok, tok = nil, @tok\n @prev = tok\n return tok\n end", "def unary\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 60 )\n return_value = UnaryReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n root_0 = nil\n string_literal272 = nil\n string_literal274 = nil\n string_literal276 = nil\n string_literal278 = nil\n string_literal280 = nil\n string_literal282 = nil\n string_literal284 = nil\n char_literal286 = nil\n char_literal288 = nil\n char_literal290 = nil\n char_literal292 = nil\n unary273 = nil\n unary275 = nil\n unary277 = nil\n unary279 = nil\n unary281 = nil\n unary283 = nil\n unary285 = nil\n unary287 = nil\n unary289 = nil\n unary291 = nil\n unary293 = nil\n postfix294 = nil\n\n tree_for_string_literal272 = nil\n tree_for_string_literal274 = nil\n tree_for_string_literal276 = nil\n tree_for_string_literal278 = nil\n tree_for_string_literal280 = nil\n tree_for_string_literal282 = nil\n tree_for_string_literal284 = nil\n tree_for_char_literal286 = nil\n tree_for_char_literal288 = nil\n tree_for_char_literal290 = nil\n tree_for_char_literal292 = nil\n stream_DECR = ANTLR3::AST::RewriteRuleTokenStream.new( @adaptor, \"token DECR\" )\n stream_PLUS = ANTLR3::AST::RewriteRuleTokenStream.new( @adaptor, \"token PLUS\" )\n stream_IS_DEFINED = ANTLR3::AST::RewriteRuleTokenStream.new( @adaptor, \"token IS_DEFINED\" )\n stream_VOID = ANTLR3::AST::RewriteRuleTokenStream.new( @adaptor, \"token VOID\" )\n stream_DELETE = ANTLR3::AST::RewriteRuleTokenStream.new( @adaptor, \"token DELETE\" )\n stream_NOT = ANTLR3::AST::RewriteRuleTokenStream.new( @adaptor, \"token NOT\" )\n stream_MINUS = ANTLR3::AST::RewriteRuleTokenStream.new( @adaptor, \"token MINUS\" )\n stream_INCR = ANTLR3::AST::RewriteRuleTokenStream.new( @adaptor, \"token INCR\" )\n stream_TYPEOF = ANTLR3::AST::RewriteRuleTokenStream.new( @adaptor, \"token TYPEOF\" )\n stream_IS_UNDEFINED = ANTLR3::AST::RewriteRuleTokenStream.new( @adaptor, \"token IS_UNDEFINED\" )\n stream_TILDE = ANTLR3::AST::RewriteRuleTokenStream.new( @adaptor, \"token TILDE\" )\n stream_unary = ANTLR3::AST::RewriteRuleSubtreeStream.new( @adaptor, \"rule unary\" )\n stream_postfix = ANTLR3::AST::RewriteRuleSubtreeStream.new( @adaptor, \"rule postfix\" )\n begin\n # at line 634:3: ( 'delete' unary -> ^( 'delete' unary ) | 'void' unary -> ^( 'void' unary ) | 'typeof' unary -> ^( 'typeof' unary ) | 'defined?' unary -> ^( 'defined?' unary ) | 'undefined?' unary -> ^( 'undefined?' unary ) | '++' unary -> ^( '++' unary ) | '--' unary -> ^( '--' unary ) | '+' unary -> ^( UPLUS[ '+' ] unary ) | '-' unary -> ^( UMINUS[ '-' ] unary ) | '~' unary -> ^( '~' unary ) | '!' unary -> ^( '!' unary ) | postfix -> postfix )\n alt_67 = 12\n alt_67 = @dfa67.predict( @input )\n case alt_67\n when 1\n # at line 634:5: 'delete' unary\n string_literal272 = match( DELETE, TOKENS_FOLLOWING_DELETE_IN_unary_4218 )\n if @state.backtracking == 0\n stream_DELETE.add( string_literal272 )\n end\n @state.following.push( TOKENS_FOLLOWING_unary_IN_unary_4224 )\n unary273 = unary\n @state.following.pop\n if @state.backtracking == 0\n stream_unary.add( unary273.tree )\n end\n # AST Rewrite\n # elements: unary, DELETE\n # token labels: \n # rule labels: return_value\n # token list labels: \n # rule list labels: \n # wildcard labels: \n if @state.backtracking == 0\n\n return_value.tree = root_0\n stream_return_value = return_value ? subtree_stream( \"rule return_value\", return_value.tree ) : subtree_stream( \"token return_value\" )\n\n root_0 = @adaptor.create_flat_list\n # 634:24: -> ^( 'delete' unary )\n # at line 634:27: ^( 'delete' unary )\n root_1 = @adaptor.create_flat_list\n root_1 = @adaptor.become_root( stream_DELETE.next_node, root_1 )\n\n @adaptor.add_child( root_1, stream_unary.next_tree )\n\n @adaptor.add_child( root_0, root_1 )\n\n\n\n return_value.tree = root_0\n\n end\n when 2\n # at line 635:5: 'void' unary\n string_literal274 = match( VOID, TOKENS_FOLLOWING_VOID_IN_unary_4240 )\n if @state.backtracking == 0\n stream_VOID.add( string_literal274 )\n end\n @state.following.push( TOKENS_FOLLOWING_unary_IN_unary_4248 )\n unary275 = unary\n @state.following.pop\n if @state.backtracking == 0\n stream_unary.add( unary275.tree )\n end\n # AST Rewrite\n # elements: VOID, unary\n # token labels: \n # rule labels: return_value\n # token list labels: \n # rule list labels: \n # wildcard labels: \n if @state.backtracking == 0\n\n return_value.tree = root_0\n stream_return_value = return_value ? subtree_stream( \"rule return_value\", return_value.tree ) : subtree_stream( \"token return_value\" )\n\n root_0 = @adaptor.create_flat_list\n # 635:24: -> ^( 'void' unary )\n # at line 635:27: ^( 'void' unary )\n root_1 = @adaptor.create_flat_list\n root_1 = @adaptor.become_root( stream_VOID.next_node, root_1 )\n\n @adaptor.add_child( root_1, stream_unary.next_tree )\n\n @adaptor.add_child( root_0, root_1 )\n\n\n\n return_value.tree = root_0\n\n end\n when 3\n # at line 636:5: 'typeof' unary\n string_literal276 = match( TYPEOF, TOKENS_FOLLOWING_TYPEOF_IN_unary_4264 )\n if @state.backtracking == 0\n stream_TYPEOF.add( string_literal276 )\n end\n @state.following.push( TOKENS_FOLLOWING_unary_IN_unary_4270 )\n unary277 = unary\n @state.following.pop\n if @state.backtracking == 0\n stream_unary.add( unary277.tree )\n end\n # AST Rewrite\n # elements: unary, TYPEOF\n # token labels: \n # rule labels: return_value\n # token list labels: \n # rule list labels: \n # wildcard labels: \n if @state.backtracking == 0\n\n return_value.tree = root_0\n stream_return_value = return_value ? subtree_stream( \"rule return_value\", return_value.tree ) : subtree_stream( \"token return_value\" )\n\n root_0 = @adaptor.create_flat_list\n # 636:24: -> ^( 'typeof' unary )\n # at line 636:27: ^( 'typeof' unary )\n root_1 = @adaptor.create_flat_list\n root_1 = @adaptor.become_root( stream_TYPEOF.next_node, root_1 )\n\n @adaptor.add_child( root_1, stream_unary.next_tree )\n\n @adaptor.add_child( root_0, root_1 )\n\n\n\n return_value.tree = root_0\n\n end\n when 4\n # at line 637:5: 'defined?' unary\n string_literal278 = match( IS_DEFINED, TOKENS_FOLLOWING_IS_DEFINED_IN_unary_4286 )\n if @state.backtracking == 0\n stream_IS_DEFINED.add( string_literal278 )\n end\n @state.following.push( TOKENS_FOLLOWING_unary_IN_unary_4290 )\n unary279 = unary\n @state.following.pop\n if @state.backtracking == 0\n stream_unary.add( unary279.tree )\n end\n # AST Rewrite\n # elements: unary, IS_DEFINED\n # token labels: \n # rule labels: return_value\n # token list labels: \n # rule list labels: \n # wildcard labels: \n if @state.backtracking == 0\n\n return_value.tree = root_0\n stream_return_value = return_value ? subtree_stream( \"rule return_value\", return_value.tree ) : subtree_stream( \"token return_value\" )\n\n root_0 = @adaptor.create_flat_list\n # 637:24: -> ^( 'defined?' unary )\n # at line 637:27: ^( 'defined?' unary )\n root_1 = @adaptor.create_flat_list\n root_1 = @adaptor.become_root( stream_IS_DEFINED.next_node, root_1 )\n\n @adaptor.add_child( root_1, stream_unary.next_tree )\n\n @adaptor.add_child( root_0, root_1 )\n\n\n\n return_value.tree = root_0\n\n end\n when 5\n # at line 638:5: 'undefined?' unary\n string_literal280 = match( IS_UNDEFINED, TOKENS_FOLLOWING_IS_UNDEFINED_IN_unary_4306 )\n if @state.backtracking == 0\n stream_IS_UNDEFINED.add( string_literal280 )\n end\n @state.following.push( TOKENS_FOLLOWING_unary_IN_unary_4308 )\n unary281 = unary\n @state.following.pop\n if @state.backtracking == 0\n stream_unary.add( unary281.tree )\n end\n # AST Rewrite\n # elements: IS_UNDEFINED, unary\n # token labels: \n # rule labels: return_value\n # token list labels: \n # rule list labels: \n # wildcard labels: \n if @state.backtracking == 0\n\n return_value.tree = root_0\n stream_return_value = return_value ? subtree_stream( \"rule return_value\", return_value.tree ) : subtree_stream( \"token return_value\" )\n\n root_0 = @adaptor.create_flat_list\n # 638:24: -> ^( 'undefined?' unary )\n # at line 638:27: ^( 'undefined?' unary )\n root_1 = @adaptor.create_flat_list\n root_1 = @adaptor.become_root( stream_IS_UNDEFINED.next_node, root_1 )\n\n @adaptor.add_child( root_1, stream_unary.next_tree )\n\n @adaptor.add_child( root_0, root_1 )\n\n\n\n return_value.tree = root_0\n\n end\n when 6\n # at line 639:5: '++' unary\n string_literal282 = match( INCR, TOKENS_FOLLOWING_INCR_IN_unary_4324 )\n if @state.backtracking == 0\n stream_INCR.add( string_literal282 )\n end\n @state.following.push( TOKENS_FOLLOWING_unary_IN_unary_4334 )\n unary283 = unary\n @state.following.pop\n if @state.backtracking == 0\n stream_unary.add( unary283.tree )\n end\n # AST Rewrite\n # elements: INCR, unary\n # token labels: \n # rule labels: return_value\n # token list labels: \n # rule list labels: \n # wildcard labels: \n if @state.backtracking == 0\n\n return_value.tree = root_0\n stream_return_value = return_value ? subtree_stream( \"rule return_value\", return_value.tree ) : subtree_stream( \"token return_value\" )\n\n root_0 = @adaptor.create_flat_list\n # 639:24: -> ^( '++' unary )\n # at line 639:27: ^( '++' unary )\n root_1 = @adaptor.create_flat_list\n root_1 = @adaptor.become_root( stream_INCR.next_node, root_1 )\n\n @adaptor.add_child( root_1, stream_unary.next_tree )\n\n @adaptor.add_child( root_0, root_1 )\n\n\n\n return_value.tree = root_0\n\n end\n when 7\n # at line 640:5: '--' unary\n string_literal284 = match( DECR, TOKENS_FOLLOWING_DECR_IN_unary_4350 )\n if @state.backtracking == 0\n stream_DECR.add( string_literal284 )\n end\n @state.following.push( TOKENS_FOLLOWING_unary_IN_unary_4360 )\n unary285 = unary\n @state.following.pop\n if @state.backtracking == 0\n stream_unary.add( unary285.tree )\n end\n # AST Rewrite\n # elements: DECR, unary\n # token labels: \n # rule labels: return_value\n # token list labels: \n # rule list labels: \n # wildcard labels: \n if @state.backtracking == 0\n\n return_value.tree = root_0\n stream_return_value = return_value ? subtree_stream( \"rule return_value\", return_value.tree ) : subtree_stream( \"token return_value\" )\n\n root_0 = @adaptor.create_flat_list\n # 640:24: -> ^( '--' unary )\n # at line 640:27: ^( '--' unary )\n root_1 = @adaptor.create_flat_list\n root_1 = @adaptor.become_root( stream_DECR.next_node, root_1 )\n\n @adaptor.add_child( root_1, stream_unary.next_tree )\n\n @adaptor.add_child( root_0, root_1 )\n\n\n\n return_value.tree = root_0\n\n end\n when 8\n # at line 641:5: '+' unary\n char_literal286 = match( PLUS, TOKENS_FOLLOWING_PLUS_IN_unary_4376 )\n if @state.backtracking == 0\n stream_PLUS.add( char_literal286 )\n end\n @state.following.push( TOKENS_FOLLOWING_unary_IN_unary_4387 )\n unary287 = unary\n @state.following.pop\n if @state.backtracking == 0\n stream_unary.add( unary287.tree )\n end\n # AST Rewrite\n # elements: unary\n # token labels: \n # rule labels: return_value\n # token list labels: \n # rule list labels: \n # wildcard labels: \n if @state.backtracking == 0\n\n return_value.tree = root_0\n stream_return_value = return_value ? subtree_stream( \"rule return_value\", return_value.tree ) : subtree_stream( \"token return_value\" )\n\n root_0 = @adaptor.create_flat_list\n # 641:24: -> ^( UPLUS[ '+' ] unary )\n # at line 641:27: ^( UPLUS[ '+' ] unary )\n root_1 = @adaptor.create_flat_list\n root_1 = @adaptor.become_root( @adaptor.create( UPLUS, '+' ), root_1 )\n\n @adaptor.add_child( root_1, stream_unary.next_tree )\n\n @adaptor.add_child( root_0, root_1 )\n\n\n\n return_value.tree = root_0\n\n end\n when 9\n # at line 642:5: '-' unary\n char_literal288 = match( MINUS, TOKENS_FOLLOWING_MINUS_IN_unary_4404 )\n if @state.backtracking == 0\n stream_MINUS.add( char_literal288 )\n end\n @state.following.push( TOKENS_FOLLOWING_unary_IN_unary_4415 )\n unary289 = unary\n @state.following.pop\n if @state.backtracking == 0\n stream_unary.add( unary289.tree )\n end\n # AST Rewrite\n # elements: unary\n # token labels: \n # rule labels: return_value\n # token list labels: \n # rule list labels: \n # wildcard labels: \n if @state.backtracking == 0\n\n return_value.tree = root_0\n stream_return_value = return_value ? subtree_stream( \"rule return_value\", return_value.tree ) : subtree_stream( \"token return_value\" )\n\n root_0 = @adaptor.create_flat_list\n # 642:24: -> ^( UMINUS[ '-' ] unary )\n # at line 642:27: ^( UMINUS[ '-' ] unary )\n root_1 = @adaptor.create_flat_list\n root_1 = @adaptor.become_root( @adaptor.create( UMINUS, '-' ), root_1 )\n\n @adaptor.add_child( root_1, stream_unary.next_tree )\n\n @adaptor.add_child( root_0, root_1 )\n\n\n\n return_value.tree = root_0\n\n end\n when 10\n # at line 643:5: '~' unary\n char_literal290 = match( TILDE, TOKENS_FOLLOWING_TILDE_IN_unary_4432 )\n if @state.backtracking == 0\n stream_TILDE.add( char_literal290 )\n end\n @state.following.push( TOKENS_FOLLOWING_unary_IN_unary_4443 )\n unary291 = unary\n @state.following.pop\n if @state.backtracking == 0\n stream_unary.add( unary291.tree )\n end\n # AST Rewrite\n # elements: TILDE, unary\n # token labels: \n # rule labels: return_value\n # token list labels: \n # rule list labels: \n # wildcard labels: \n if @state.backtracking == 0\n\n return_value.tree = root_0\n stream_return_value = return_value ? subtree_stream( \"rule return_value\", return_value.tree ) : subtree_stream( \"token return_value\" )\n\n root_0 = @adaptor.create_flat_list\n # 643:24: -> ^( '~' unary )\n # at line 643:27: ^( '~' unary )\n root_1 = @adaptor.create_flat_list\n root_1 = @adaptor.become_root( stream_TILDE.next_node, root_1 )\n\n @adaptor.add_child( root_1, stream_unary.next_tree )\n\n @adaptor.add_child( root_0, root_1 )\n\n\n\n return_value.tree = root_0\n\n end\n when 11\n # at line 644:5: '!' unary\n char_literal292 = match( NOT, TOKENS_FOLLOWING_NOT_IN_unary_4459 )\n if @state.backtracking == 0\n stream_NOT.add( char_literal292 )\n end\n @state.following.push( TOKENS_FOLLOWING_unary_IN_unary_4470 )\n unary293 = unary\n @state.following.pop\n if @state.backtracking == 0\n stream_unary.add( unary293.tree )\n end\n # AST Rewrite\n # elements: unary, NOT\n # token labels: \n # rule labels: return_value\n # token list labels: \n # rule list labels: \n # wildcard labels: \n if @state.backtracking == 0\n\n return_value.tree = root_0\n stream_return_value = return_value ? subtree_stream( \"rule return_value\", return_value.tree ) : subtree_stream( \"token return_value\" )\n\n root_0 = @adaptor.create_flat_list\n # 644:24: -> ^( '!' unary )\n # at line 644:27: ^( '!' unary )\n root_1 = @adaptor.create_flat_list\n root_1 = @adaptor.become_root( stream_NOT.next_node, root_1 )\n\n @adaptor.add_child( root_1, stream_unary.next_tree )\n\n @adaptor.add_child( root_0, root_1 )\n\n\n\n return_value.tree = root_0\n\n end\n when 12\n # at line 645:5: postfix\n @state.following.push( TOKENS_FOLLOWING_postfix_IN_unary_4486 )\n postfix294 = postfix\n @state.following.pop\n if @state.backtracking == 0\n stream_postfix.add( postfix294.tree )\n end\n # AST Rewrite\n # elements: postfix\n # token labels: \n # rule labels: return_value\n # token list labels: \n # rule list labels: \n # wildcard labels: \n if @state.backtracking == 0\n\n return_value.tree = root_0\n stream_return_value = return_value ? subtree_stream( \"rule return_value\", return_value.tree ) : subtree_stream( \"token return_value\" )\n\n root_0 = @adaptor.create_flat_list\n # 645:24: -> postfix\n @adaptor.add_child( root_0, stream_postfix.next_tree )\n\n\n\n return_value.tree = root_0\n\n end\n end# - - - - - - - rule clean up - - - - - - - -\n return_value.stop = @input.look( -1 )\n\n if @state.backtracking == 0\n\n return_value.tree = @adaptor.rule_post_processing( root_0 )\n @adaptor.set_token_boundaries( return_value.tree, return_value.start, return_value.stop )\n\n end\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n return_value.tree = @adaptor.create_error_node( @input, return_value.start, @input.look(-1), re )\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 60 )\n\n end\n \n return return_value\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n token =\n case state\n when nil then\n case\n when ss.skip(/\\s+/) then\n # do nothing\n when ss.skip(/:(#{SYMBOL_NAME})/o) then\n action { emit :tSYMBOL, &:to_sym }\n when ss.skip(/\"(.+?)\"/) then\n action { emit :tSTRING }\n when ss.skip(/[-+]?\\d+\\.\\d+/) then\n action { emit :tNUMBER, &:to_f }\n when ss.skip(/[-+]?\\d+/) then\n action { emit :tNUMBER, &:to_i }\n when ss.skip(/#{Regexp.union(\n %w\"( ) { | } [ ] < > $ ! ^ ` ... + * ? ,\"\n )}/o) then\n action { emit ss.matched, &:to_sym }\n when ss.skip(/#{REGEXP}/o) then\n action { emit_regexp }\n when ss.skip(/%?(#{CONST_NAME})/o) then\n action { emit :tPARAM_CONST }\n when ss.skip(/%([a-z_]+)/) then\n action { emit :tPARAM_NAMED }\n when ss.skip(/%(\\d*)/) then\n action { emit(:tPARAM_NUMBER) { |s| s.empty? ? 1 : s.to_i } } # Map `%` to `%1`\n when ss.skip(/_(#{IDENTIFIER})/o) then\n action { emit :tUNIFY }\n when ss.skip(/_/o) then\n action { emit :tWILDCARD }\n when ss.skip(/\\#(#{CALL})/o) then\n action { @state = :ARG; emit :tFUNCTION_CALL, &:to_sym }\n when ss.skip(/#{IDENTIFIER}\\?/o) then\n action { @state = :ARG; emit :tPREDICATE, &:to_sym }\n when ss.skip(/#{NODE_TYPE}/o) then\n action { emit :tNODE_TYPE, &:to_sym }\n when ss.skip(/\\#.*/) then\n action { emit_comment }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n when :ARG then\n case\n when ss.skip(/\\(/) then\n action { @state = nil; emit :tARG_LIST }\n when ss.skip(//) then\n action { @state = nil }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def next_token\n token = @enum[@pointer]\n raise NonstringTokenError unless token.nil? || token.kind_of?(String) \n @pointer += 1\n token\n end", "def get_token\n column, line = @column, @line\n while true\n get_chars if @buf.empty? and not @eof\n return [:eof, nil, line, column] if @eof and @buf.empty?\n case @buf\n when /\\A\\(/\n eat(1)\n return [:open, nil, line, column]\n when /\\A\\)/\n eat(1)\n return [:close, nil, line, column]\n when /\\A\\[\\]=/\n eat(3)\n return [:expr, :\"[]=\", line, column]\n when /\\A\\[\\](.*)/m\n # Can be partial []=\n if ($1 == \"\") and not @eof\n get_chars\n redo\n end\n eat(2)\n return [:expr, :\"[]\", line, column]\n when /\\A\\[(.*)/m\n # Can be partial [] or []=\n if ($1 == \"\") and not @eof\n get_chars\n redo\n end\n eat(1)\n return [:sqopen, nil, line, column]\n when /\\A\\]/\n eat(1)\n return [:sqclose, nil, line, column]\n when /\\A\\'/\n eat(1)\n return [:quote, nil, line, column]\n when /\\A\\`/ # `\n eat(1)\n return [:quasiquote, nil, line, column]\n when /\\A\\,@/\n eat(2)\n return [:\"unquote-splicing\", nil, line, column]\n when /\\A\\,(.?)/m\n # Possible begin of ,@\n if $1 == \"\" and not @eof\n get_chars\n redo\n else\n eat(1)\n return [:unquote, nil, line, column]\n end\n when /\\A([ \\t\\r\\n]+)/\n eat($1.size)\n column, line = @column, @line\n redo\n when /\\A(#!.*\\n)/\n eat($1.size)\n column, line = @column, @line\n redo\n when /\\A(;.*\\n)/\n eat($1.size)\n column, line = @column, @line\n redo\n when /\\A;/m\n # Partial COMMENT\n if @eof\n return\n else\n get_chars\n redo\n end\n when /\\A#!/m\n # Partial SHEBANG\n if @eof\n return\n else\n get_chars\n redo\n end\n when /\\A#t/\n eat(2)\n return [:expr, :true, line, column]\n when /\\A#f/\n eat(2)\n return [:expr, :false, line, column]\n when /\\A#\\Z/m\n # Partial SHEBANG or #T or #F\n unless @eof\n get_chars\n redo\n end\n when /\\A([+\\-]?[0-9]+(?:(?:\\.[0-9]+)?[eE][+\\-]?[0-9]+|\\.[0-9]+))(.?)/m\n # Possible FLOAT\n # Partial FLOAT also matches, so continue if possible\n s, c = $1, $2\n if (c == \"\" or c =~ /\\A[eE]/) and not @eof\n get_chars\n redo\n else\n eat(s.size)\n return [:expr, eval(s), line, column]\n end\n when /\\A([+\\-]?(?:[1-9][0-9]*|0x[0-9a-fA-F]+|0b[01]+|0o[0-7]+|0[0-7]+|0))(.?)/m\n # Possible INT\n # Partial INT also matches, so continue if possible\n # Partial FLOAT also matches, so handle it\n s, c = $1, $2\n if (c == \"\" or c =~ /\\A[.eExbo]/) and not @eof\n get_chars\n redo\n else\n eat(s.size)\n return [:expr, eval(s), line, column]\n end\n when /\\A([a-zA-Z!$%&*+\\-.:<=>?@^_~][0-9a-zA-Z!$%&*+\\-.:<=>?@^_~]*)(.?)/m\n # Possible ID\n # Partial ID also matches, so continue if possible\n if $2 == \"\" and not @eof\n get_chars\n redo\n else\n eat($1.size)\n s = $1.to_sym\n stt = Hash.new{|ht,k| k}.merge({ :\"..\" => :dotdot, :\"...\" => :dotdotdot })\n return [:expr, stt[s], line, column]\n end\n when /\\A(\"(?:[^\"#\\\\]|#*\\\\.|#+[^{\\\\#\"])*#*\")/\n eat($1.size)\n return [:expr, eval($1), line, column]\n when /\\A((\"(?:[^\"#\\\\]|#*\\\\.|#+[^{\\\\#\"])*#*)#\\{)/\n eat($1.size)\n return [:istr_beg, eval($2+'\"'), line, column]\n when /\\A(\\}((?:[^\"#\\\\]|#*\\\\.|#+[^{\\\\#\"])*#*\"))/\n eat($1.size)\n return [:istr_end, eval('\"'+$2), line, column]\n when /\\A(\\}((?:[^\"#\\\\]|#*\\\\.|#+[^{\\\\#\"])*#*)#\\{)/\n eat($1.size)\n return [:istr_mid, eval('\"'+$2+'\"'), line, column]\n when /\\A\"/ # \"\n # Possible partial string/istr_beg\n if @eof\n raise \"EOF inside string: #{@buf}\"\n else\n get_chars\n redo\n end\n when /\\A\\}/ # \"\n # Possible partial istr_mid/istr_end\n if @eof\n raise \"EOF inside interpolated string: #{@buf}\"\n else\n get_chars\n redo\n end\n when /\\A(\\/(?:[^\\/\\\\]|\\\\.)*\\/[mix]*)(.?)/\n if $2 == \"\" and not @eof\n get_chars\n redo\n else\n eat($1.size)\n return [:expr, eval($1), line, column]\n end\n when /\\A\\//\n # Possible partial regexp\n if @eof\n raise \"EOF inside interpolated string: #{@buf}\"\n else\n get_chars\n redo\n end\n else\n raise \"Not sure what to do with: #{@buf}\"\n end\n end \n end", "def product_punctuation(str)\nend", "def peekChar\r\n\t\tcheckLine\r\n\t\tcheckSpace\r\n\t\treturn @Line[0]\r\n\tend", "def nextWhite()\r\n str = \"\"\r\n while /\\s/.match?(@c)\r\n str += @c\r\n nextCh()\r\n end\r\n \r\n return Token.new(Token::WHITESPACE, str)\r\n end", "def read_character\n lit = read_literal\n\n return \" \" if lit.empty? && peek_char == \" \"\n CHARACTERS.fetch(lit.downcase) do\n # Return just the first character\n unread(lit[1..-1])\n lit[0,1]\n end\n end", "def peek_char(lookahead = 1)\n @string[@current_char_pos + lookahead, 1]\n end", "def char_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 41)\n\n type = CHAR_LITERAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 482:4: '\\\\'' LITERAL_CHAR '\\\\''\n match(?\\')\n literal_char!\n match(?\\')\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 41)\n\n end", "def forward_word\n #forward_regex(/[[:punct:][:space:]]\\w/)\n forward_regex(:word)\n end", "def test_speak_as_literal_punctuation\n style = \"#speak-as-lp-local,\n #speak-as-lp-inherit,\n #speak-as-lp-local-ignore,\n #speak-as-lp-inherit-ignore {\n speak-as: literal-punctuation;\n }\"\n html_parser = Hatemile::Util::Html::NokogiriLib::NokogiriHTMLDOMParser.new(\n \"<!DOCTYPE html>\n <html>\n <head>\n <title>HaTeMiLe Tests</title>\n <meta charset=\\\"UTF-8\\\" />\n </head>\n <body>\n <span id=\\\"speak-as-lp-local\\\">Speak this text.</span>\n <div id=\\\"speak-as-lp-inherit\\\">\n Speak <strong>this text.</strong>\n </div>\n <span id=\\\"speak-as-lp-local-ignore\\\" #{DATA_IGNORE}>\n Speak this text.\n </span>\n <div id=\\\"speak-as-lp-inherit-ignore\\\" #{DATA_IGNORE}>\n Speak <strong>this text.</strong>\n </div>\n </body>\n </html>\"\n )\n css_parser = Hatemile::Util::Css::Rcp::RCPParser.new(style)\n css = Hatemile::Implementation::AccessibleCSSImplementation.new(\n html_parser,\n css_parser,\n @configure\n )\n css.provide_all_speak_properties\n speak_as_lp_local = html_parser.find('#speak-as-lp-local').first_result\n speak_as_lp_inherit = html_parser.find('#speak-as-lp-inherit').first_result\n speak_as_lp_local_ignore = html_parser.find(\n '#speak-as-lp-local-ignore'\n ).first_result\n speak_as_lp_inherit_ignore = html_parser.find(\n '#speak-as-lp-inherit-ignore'\n ).first_result\n\n assert_equal(\n 'Speak this text Dot .',\n speak_as_lp_local.get_text_content\n )\n assert_equal(\n 'Speak this text Dot .',\n speak_as_lp_inherit.get_text_content.strip\n )\n assert_equal(\n 'Speak this text.',\n speak_as_lp_local_ignore.get_text_content.strip\n )\n assert_equal(\n 'Speak this text.',\n speak_as_lp_inherit_ignore.get_text_content.strip\n )\n end", "def operation_speak_as_no_punctuation(content, index, children)\n unless index.zero?\n children.push(\n create_content_element(content[0..(index - 1)], 'no-punctuation')\n )\n end\n children.push(\n create_visual_content_element(\n content[index..index],\n 'no-punctuation'\n )\n )\n\n children\n end" ]
[ "0.61056906", "0.59732217", "0.5820936", "0.5812468", "0.5586082", "0.548437", "0.5427205", "0.54215664", "0.5326504", "0.53190356", "0.53150266", "0.5258005", "0.5222149", "0.52096945", "0.51778907", "0.51275945", "0.5126917", "0.51174825", "0.51143944", "0.50990427", "0.5093429", "0.5057452", "0.50573266", "0.5055841", "0.503768", "0.5023056", "0.49938428", "0.49927765", "0.4989943", "0.49898788", "0.4980493", "0.4977466", "0.49536464", "0.49198708", "0.49152377", "0.4914911", "0.4914232", "0.48893052", "0.48802978", "0.4854661", "0.48480898", "0.48479626", "0.48446864", "0.48381826", "0.48287946", "0.48175544", "0.48164877", "0.4814967", "0.48093265", "0.48011425", "0.47880355", "0.47591975", "0.47590426", "0.47559747", "0.47389415", "0.4726782", "0.47055963", "0.46936613", "0.46922436", "0.46768597", "0.46675536", "0.46557397", "0.4652619", "0.46523395", "0.4650658", "0.46474686", "0.4643982", "0.4632714", "0.46295694", "0.46295694", "0.46290103", "0.46130413", "0.45836833", "0.45742485", "0.45703304", "0.4561272", "0.4561272", "0.45378375", "0.45294088", "0.4501799", "0.4488413", "0.4483677", "0.44753343", "0.44720337", "0.44680977", "0.4462715", "0.445759", "0.4452604", "0.44457912", "0.44421118", "0.4441695", "0.44328097", "0.4420128", "0.4418275", "0.44180185", "0.44159824", "0.44137207", "0.4410391", "0.44062424", "0.44000953" ]
0.7801193
0
Tests next literal is DivPunctuator or not. If literal is DivPunctuator return ECMA262::PUNC_DIV or ECMA262::PUNC_DIVASSIGN object and forward lexical parser position. Otherwise return nil and position is not changed.
def div_punctuator if @codes[@pos] == 0x2f if @codes[@pos+1] == 0x3d @pos += 2 return ECMA262::PUNC_DIVASSIGN else @pos += 1 return ECMA262::PUNC_DIV end end nil end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def punctuator\n code0 = @codes[@pos]\n code1 = @codes[@pos+1]\n code2 = @codes[@pos+2]\n code3 = @codes[@pos+3]\n if false\n elsif code0 == 0x28 # (\n @pos += 1 # (\n return ECMA262::PUNC_LPARENTHESIS\n elsif code0 == 0x29 # )\n @pos += 1 # )\n return ECMA262::PUNC_RPARENTHESIS\n elsif code0 == 0x7b # {\n @pos += 1 # {\n return ECMA262::PUNC_LCURLYBRAC\n elsif code0 == 0x7d # }\n @pos += 1 # }\n return ECMA262::PUNC_RCURLYBRAC\n elsif code0 == 0x3b # ;\n @pos += 1 # ;\n return ECMA262::PUNC_SEMICOLON\n elsif code0 == 0x3d # =\n if code1 == 0x3d and code2 == 0x3d # ===\n @pos += 3\n return ECMA262::PUNC_SEQ\n end\n if code1 == 0x3d # ==\n @pos += 2\n return ECMA262::PUNC_EQ\n end\n @pos += 1 # =\n return ECMA262::PUNC_ASSIGN\n elsif code0 == 0x21 # !\n if code1 == 0x3d and code2 == 0x3d # !==\n @pos += 3\n return ECMA262::PUNC_SNEQ\n end\n if code1 == 0x3d # !=\n @pos += 2\n return ECMA262::PUNC_NEQ\n end\n @pos += 1 # !\n return ECMA262::PUNC_LNOT\n elsif code0 == 0x25 # %\n if code1 == 0x3d # %=\n @pos += 2\n return ECMA262::PUNC_MODASSIGN\n end\n @pos += 1 # %\n return ECMA262::PUNC_MOD\n elsif code0 == 0x26 # &\n if code1 == 0x3d # &=\n @pos += 2\n return ECMA262::PUNC_ANDASSIGN\n end\n if code1 == 0x26 # &&\n @pos += 2\n return ECMA262::PUNC_LAND\n end\n @pos += 1 # &\n return ECMA262::PUNC_AND\n elsif code0 == 0x2a # *\n if code1 == 0x3d # *=\n @pos += 2\n return ECMA262::PUNC_MULASSIGN\n end\n @pos += 1 # *\n return ECMA262::PUNC_MUL\n elsif code0 == 0x2b # +\n if code1 == 0x3d # +=\n @pos += 2\n return ECMA262::PUNC_ADDASSIGN\n end\n if code1 == 0x2b # ++\n @pos += 2\n return ECMA262::PUNC_INC\n end\n @pos += 1 # +\n return ECMA262::PUNC_ADD\n elsif code0 == 0x2c # ,\n @pos += 1 # ,\n return ECMA262::PUNC_COMMA\n elsif code0 == 0x2d # -\n if code1 == 0x3d # -=\n @pos += 2\n return ECMA262::PUNC_SUBASSIGN\n end\n if code1 == 0x2d # --\n @pos += 2\n return ECMA262::PUNC_DEC\n end\n @pos += 1 # -\n return ECMA262::PUNC_SUB\n elsif code0 == 0x2e # .\n @pos += 1 # .\n return ECMA262::PUNC_PERIOD\n elsif code0 == 0x3a # :\n @pos += 1 # :\n return ECMA262::PUNC_COLON\n elsif code0 == 0x3c # <\n if code1 == 0x3d # <=\n @pos += 2\n return ECMA262::PUNC_LTEQ\n end\n if code1 == 0x3c and code2 == 0x3d # <<=\n @pos += 3\n return ECMA262::PUNC_LSHIFTASSIGN\n end\n if code1 == 0x3c # <<\n @pos += 2\n return ECMA262::PUNC_LSHIFT\n end\n @pos += 1 # <\n return ECMA262::PUNC_LT\n elsif code0 == 0x3e # >\n if code1 == 0x3e and code2 == 0x3e and code3 == 0x3d # >>>=\n @pos += 4\n return ECMA262::PUNC_URSHIFTASSIGN\n end\n if code1 == 0x3e and code2 == 0x3e # >>>\n @pos += 3\n return ECMA262::PUNC_URSHIFT\n end\n if code1 == 0x3e and code2 == 0x3d # >>=\n @pos += 3\n return ECMA262::PUNC_RSHIFTASSIGN\n end\n if code1 == 0x3e # >>\n @pos += 2\n return ECMA262::PUNC_RSHIFT\n end\n if code1 == 0x3d # >=\n @pos += 2\n return ECMA262::PUNC_GTEQ\n end\n @pos += 1 # >\n return ECMA262::PUNC_GT\n elsif code0 == 0x3f # ?\n @pos += 1 # ?\n return ECMA262::PUNC_CONDIF\n elsif code0 == 0x5b # [\n @pos += 1 # [\n return ECMA262::PUNC_LSQBRAC\n elsif code0 == 0x5d # ]\n @pos += 1 # ]\n return ECMA262::PUNC_RSQBRAC\n elsif code0 == 0x5e # ^\n if code1 == 0x3d # ^=\n @pos += 2\n return ECMA262::PUNC_XORASSIGN\n end\n @pos += 1 # ^\n return ECMA262::PUNC_XOR\n elsif code0 == 0x7c # |\n if code1 == 0x7c # ||\n @pos += 2\n return ECMA262::PUNC_LOR\n end\n if code1 == 0x3d # |=\n @pos += 2\n return ECMA262::PUNC_ORASSIGN\n end\n @pos += 1 # |\n return ECMA262::PUNC_OR\n elsif code0 == 0x7e # ~\n @pos += 1 # ~\n return ECMA262::PUNC_NOT\n end\n nil\n end", "def div!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 12 )\n\n type = DIV\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 336:6: 'div'\n match( \"div\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 12 )\n\n end", "def div(p0) end", "def get_div( regex)\n get_shortcut(regex,'div')\n end", "def div!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 41 )\n\n\n\n type = DIV\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 62:6: '/'\n match( 0x2f )\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 41 )\n\n\n end", "def div!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 51 )\n\n type = DIV\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 377:7: '/'\n match( 0x2f )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 51 )\n\n end", "def on_div(ast_node, context)\n left, right = *ast_node\n\n return on_call_number(context, left) / on_call_number(context, right)\n end", "def next_node_not_div_or_nil?(node)\n next_node_name(node) != 'div' && next_node_name(node) != nil\n end", "def /(op)\n CAS::Div.new self, op\n end", "def next_input_element(hint)\n if ret = @lit_cache[@pos]\n @pos = @lit_nextpos[@pos]\n @head_pos = @pos\n return ret\n end\n pos0 = @pos\n #\n # skip white space here, because ECMA262(5.1.2) says:\n #\n # Simple white space and single-line comments are discarded and\n # do not appear in the stream of input elements for the\n # syntactic grammar.\n #\n while white_space or single_line_comment\n end\n\n ret = line_terminator || multi_line_comment || token\n if ret\n @lit_cache[pos0] = ret\n @lit_nextpos[pos0] = @pos\n @head_pos = @pos\n return ret\n end\n\n if @codes[@pos].nil?\n return nil\n end\n if hint.nil?\n if @codes[@pos] == 0x2f\n ECMA262::LIT_DIV_OR_REGEXP_LITERAL\n else\n nil\n end\n elsif hint == :div\n ret = div_punctuator\n if ret\n @lit_cache[pos0] = ret\n @lit_nextpos[pos0] = @pos\n end\n @head_pos = @pos\n return ret\n elsif hint == :regexp\n ret = regexp_literal\n if ret\n @lit_cache[pos0] = ret\n @lit_nextpos[pos0] = @pos\n end\n @head_pos = @pos\n return ret\n else\n if @codes[@pos] == 0x2f\n ECMA262::LIT_DIV_OR_REGEXP_LITERAL\n else\n nil\n end\n end\n end", "def factor\n if @tokens[@i].type == :lparn\n @i += 1 # lparn\n ret = expr\n @i += 1 # rparn\n elsif @tokens[@i].type == :iden\n ret = iden\n else\n ret = num\n end\n ret\n end", "def decimal_literal\n pos0 = @pos\n code = @codes[@pos]\n\n if code.nil?\n return nil\n elsif code == 0x2e #.\n @pos += 1\n f = decimal_digits\n if f.nil? #=> this period is punctuator\n @pos = pos0 + 1\n return ECMA262::PUNC_PERIOD\n end\n if (code = @codes[@pos]) == 0x65 || code == 0x45\n @pos += 1\n e = exponent_part\n end\n if identifier_start?(@codes[@pos])\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n end\n\n return ECMA262::ECMA262Numeric.new('0', f, e)\n elsif code == 0x30 # zero\n i = \"0\"\n @pos += 1\n if @codes[@pos] == 0x2e #.\n @pos += 1\n f = decimal_digits\n if (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E\n @pos += 1\n e = exponent_part\n end\n elsif (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E\n @pos += 1\n e = exponent_part\n end\n if identifier_start?(@codes[@pos])\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n end\n\n return ECMA262::ECMA262Numeric.new(i, f, e)\n elsif code >= 0x31 and code <= 0x39\n i = decimal_digits\n if @codes[@pos] == 0x2e #.\n @pos += 1\n f = decimal_digits\n if (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E\n @pos += 1\n e = exponent_part\n end\n elsif (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E\n @pos += 1\n e = exponent_part\n end\n if identifier_start?(@codes[@pos])\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n end\n\n return ECMA262::ECMA262Numeric.new(i, f, e)\n end\n\n nil\n end", "def divmod(p0) end", "def divide_element(el, divider)\n if Object.const_defined?('CFunc')\n if el.is_a?(CFunc::UInt64)\n el.divide(divider)\n else\n el.value\n end\n else\n el / divider\n end\n end", "def /(obj)\n return nil unless (self.top) || (self == obj) || (self =~ obj) || \n (obj == 0) || \n (obj.is_a? Number) ||\n (obj == P_Infinity) ||\n (obj == M_Infinity)\n if self =~ obj\n ret = self.left\n ret.top = self.top\n return ret\n end \n if (obj.is_a? Number) || (obj == P_Infinity) || (obj == M_Infinity)\n lft = self.left\n lft.top = true\n lft /= obj\n puts lft\n return Prod.new(lft,self.right).reduce unless lft == nil\n return nil unless self.top\n end\n return Div.new(self,obj).reduce\n end", "def Primario\n if @token.get_tipo == \"num\" then\n t, msg = le(\"num\")\n if t\n return Hash[\"tag\" => \"num\", \"1\" => t.get_lexama.to_f]\n else\n return nil, msg\n end\n elsif @token.get_tipo == \"(\" then\n t, msg = le(\"(\")\n if t then\n exp1, msg = self.Exp\n if exp1 then\n t, msg = le(\")\")\n if t then\n return exp1\n else\n return nil, msg \n end\n else\n return nil, msg \n end\n else\n return nil, msg \n end \n end\n\n return nil, \"Token inesperado, esperava num ou (, encontrou #{@token.get_tipo}\"\n end", "def secure_div(divident, divisor)\n return nil if divisor == 0\n divident.to_f/divisor\n end", "def divide\n match '/'\n factor\n emit_ln 'MOVE (SP)+,D1'\n emit_ln 'DIVS D1,D0'\nend", "def isPropDiv(base, div)\n\treturn base%div == 0\nend", "def rdiv(p0) end", "def operation_speak_as_literal_punctuation(content, index, children)\n data_property_value = 'literal-punctuation'\n unless index.zero?\n children.push(\n create_content_element(content[0..(index - 1)], data_property_value)\n )\n end\n children.push(\n create_aural_content_element(\n \" #{get_description_of_symbol(content[index..index])} \",\n data_property_value\n )\n )\n\n children.push(\n create_visual_content_element(\n content[index..index],\n data_property_value\n )\n )\n\n children\n end", "def divmod(divisor)\n \n if (@ca.getValidCharacters.include? divisor)\n new_coefs = @coefs.map do |a|\n quotient = @ca.binaryDivide(a, divisor)\n end\n q, r = MyPolynomial[new_coefs], MyPolynomial[' ']\n elsif divisor.is_a? MyPolynomial\n a = self; b = divisor; q = ' '; r = self\n (a.degree - b.degree + 1).times do\n dd = r.degree - b.degree\n qqa = @ca.binaryDivide(r.coefs[-1], b.coefs[-1])\n qq = MyPolynomial[dd => qqa]\n q = qq.+(q)\n r = r + (qq * divisor)\n break if r.zero?\n end\n else\n raise ArgumentError, 'divisor should be a valid character or polynomial'\n end\n [q, r]\n end", "def parse_unit\n if @s.scan(/(?=\\()/)\n parse_group\n elsif @s.scan(/(?=\\[|\\\\[dDwWhHsS]|\\.)/)\n parse_char_group\n elsif c = parse_single_char\n Char.new c\n end\n end", "def token\n identifier_name || numeric_literal || punctuator || string_literal\n end", "def speak_as_literal_punctuation(element)\n speak_as(\n element,\n get_regular_expression_of_symbols,\n 'literal-punctuation',\n method(:operation_speak_as_literal_punctuation)\n )\n end", "def factor\n if number?\n number\n else\n match(:T_LPAR)\n expr = expression\n match(:T_RPAR)\n expr\n end\n end", "def div(y)\n quo(y, ZERO)\n end", "def token_relation( numerator, divider )\n return numerator / divider\n end", "def div(arg0)\n end", "def spot_op_cdecl\n nd_lhs, op, _nd_rhs = @node.children\n *nd_parent_lhs, _const = nd_lhs.children\n if @name == op\n @snippet = @fetch[nd_lhs.last_lineno]\n if @snippet.match(/\\G\\s*(#{ Regexp.quote(op) })=/, nd_lhs.last_column)\n @beg_column = $~.begin(1)\n @end_column = $~.end(1)\n end\n else\n # constant access error\n @end_column = nd_lhs.last_column\n if nd_parent_lhs.empty? # example: ::C += 1\n if nd_lhs.first_lineno == nd_lhs.last_lineno\n @snippet = @fetch[nd_lhs.last_lineno]\n @beg_column = nd_lhs.first_column\n end\n else # example: Foo::Bar::C += 1\n if nd_parent_lhs.last.last_lineno == nd_lhs.last_lineno\n @snippet = @fetch[nd_lhs.last_lineno]\n @beg_column = nd_parent_lhs.last.last_column\n end\n end\n end\n end", "def isPunctChar(ch)\n return UNICODE_PUNCT_RE =~ ch\n end", "def _ParagraphDelimiter\n\n _save = self.pos\n while true # choice\n _tmp = apply(:_BlockDelimiter)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_PreformattedCommandHead)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_LineBlock)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_Newpage)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_HeadedStart)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_ParagraphDelimiter unless _tmp\n return _tmp\n end", "def factor\n case\n when scan(/\\(/) then expr.tap { scan(/\\)/) }\n else number\n end\n end", "def divide(other)\n Rubinius.primitive :float_div\n redo_coerced :/, other\n end", "def parse_operand\n case operand_literal[0]\n when '%'\n base = 2\n offset = 1\n when '$'\n base = 16\n offset = 1\n else\n base = 10\n offset = 0\n end\n\n operand_literal[offset..].to_i(base)\n end", "def DISABLED_test_divide_token\n assert_tokenises_as '/', DivideOpToken.instance\n assert_tokenises_as ' /', DivideOpToken.instance\n assert_tokenises_as ' / ', DivideOpToken.instance\n end", "def _HtmlBlockDiv\n\n _save = self.pos\n while true # sequence\n _tmp = apply(:_HtmlBlockOpenDiv)\n unless _tmp\n self.pos = _save\n break\n end\n while true\n\n _save2 = self.pos\n while true # choice\n _tmp = apply(:_HtmlBlockDiv)\n break if _tmp\n self.pos = _save2\n\n _save3 = self.pos\n while true # sequence\n _save4 = self.pos\n _tmp = apply(:_HtmlBlockCloseDiv)\n _tmp = _tmp ? nil : true\n self.pos = _save4\n unless _tmp\n self.pos = _save3\n break\n end\n _tmp = get_byte\n unless _tmp\n self.pos = _save3\n end\n break\n end # end sequence\n\n break if _tmp\n self.pos = _save2\n break\n end # end choice\n\n break unless _tmp\n end\n _tmp = true\n unless _tmp\n self.pos = _save\n break\n end\n _tmp = apply(:_HtmlBlockCloseDiv)\n unless _tmp\n self.pos = _save\n end\n break\n end # end sequence\n\n set_failed_rule :_HtmlBlockDiv unless _tmp\n return _tmp\n end", "def literal_token\n if match = @chunk.match(OPERATOR)\n value, _ = *match\n else\n value = @chunk[0]\n end\n tag = value\n\n if COMPOUND_ASSIGN.include?(value)\n tag = :COP\n else\n case value\n when '(', '{', '[' then @ends.push(INVERSES[value])\n when ')', '}', ']'\n prev = @tokens[-1]\n pair(value)\n tokens.delete_at(-1) if prev && prev[0] == :TERM\n end\n end\n token(tag, value)\n value.size\n end", "def factor\n space\n\n if accept(\"-\")\n AST.not(expr)\n elsif accept(\"~\")\n AST.opt(expr)\n else\n expr\n end\n end", "def div(x, op, y)\n x.send(op, y)\nend", "def /(divisor)\n if divisor.kind_of? Measures::Measure\n value = self.value / divisor.value\n unit = \"#{self.unit}/#{divisor.unit}\"\n modifier = \"base\"\n quantity = \"#{self.quantity}/#{divisor.quantity}\"\n result = ComplexMeasure.new(value,modifier,unit,quantity)\n elsif divisor.is_a? Numeric\n value = self.value / divisor\n unit = self.unit\n modifier = self.modifier\n quantity = self.quantity\n result = self.class.new(value,modifier,unit,quantity)\n else\n raise ArgumentError, \"divisor must be either a Numeric or a Measure of some sort (BaseMeasure or ComplexMeasure)\"\n end\n return result\n end", "def reduce_mul_operator_1(_production, _range, _tokens, theChildren)\n return CalcDivideNode.new(theChildren[0].symbol)\n end", "def try_declaration\n # This allows the \"*prop: val\", \":prop: val\", \"#prop: val\", and \".prop:\n # val\" hacks.\n name_start_pos = source_position\n if (s = tok(/[:\\*\\.]|\\#(?!\\{)/))\n name = [s, str {ss}]\n return name unless (ident = interp_ident)\n name << ident\n else\n return unless (name = interp_ident)\n name = Array(name)\n end\n\n if (comment = tok(COMMENT))\n name << comment\n end\n name_end_pos = source_position\n\n mid = [str {ss}]\n return name + mid unless tok(/:/)\n mid << ':'\n\n # If this is a CSS variable, parse it as a property no matter what.\n if name.first.is_a?(String) && name.first.start_with?(\"--\")\n return css_variable_declaration(name, name_start_pos, name_end_pos)\n end\n\n return name + mid + [':'] if tok(/:/)\n mid << str {ss}\n post_colon_whitespace = !mid.last.empty?\n could_be_selector = !post_colon_whitespace && (tok?(IDENT_START) || tok?(INTERP_START))\n\n value_start_pos = source_position\n value = nil\n error = catch_error do\n value = value!\n if tok?(/\\{/)\n # Properties that are ambiguous with selectors can't have additional\n # properties nested beneath them.\n tok!(/;/) if could_be_selector\n elsif !tok?(/[;{}]/)\n # We want an exception if there's no valid end-of-property character\n # exists, but we don't want to consume it if it does.\n tok!(/[;{}]/)\n end\n end\n\n if error\n rethrow error unless could_be_selector\n\n # If the value would be followed by a semicolon, it's definitely\n # supposed to be a property, not a selector.\n additional_selector = almost_any_value\n rethrow error if tok?(/;/)\n\n return name + mid + (additional_selector || [])\n end\n\n value_end_pos = source_position\n ss\n require_block = tok?(/\\{/)\n\n node = node(Sass::Tree::PropNode.new(name.flatten.compact, [value], :new),\n name_start_pos, value_end_pos)\n node.name_source_range = range(name_start_pos, name_end_pos)\n node.value_source_range = range(value_start_pos, value_end_pos)\n\n return node unless require_block\n nested_properties! node\n end", "def is_frac(latex, step)\n\tlatex[step+1..step+4].join == \"frac\"\nend", "def consume_numeric\n number = consume_number\n\n if start_identifier?\n create_token(:dimension,\n :repr => number[0],\n :type => number[2],\n :unit => consume_name,\n :value => number[1])\n\n elsif @s.peek == '%'\n @s.consume\n\n create_token(:percentage,\n :repr => number[0],\n :type => number[2],\n :value => number[1])\n\n else\n create_token(:number,\n :repr => number[0],\n :type => number[2],\n :value => number[1])\n end\n end", "def tokenize_float_literal\n advance # Pass the .\n\n until( /[0-9eE]/.match( cchar ).nil? )\n if cchar == 'e' || cchar == 'E'\n return tokenize_exponent_literal\n end\n advance\n end\n capture_token( :float_literal )\n end", "def divmod(val); end", "def split_div(str)\n str =~ /(\\d+)\\/(\\d+)/\n if $1 and $2\n result = $1.to_f / $2.to_f\n result.to_s.gsub(/\\.(\\d)\\d*/) { |s| \".\"+$1 }\n else\n str\n end\n end", "def /(expr2)\n Operator.new(S_DIV, self, expr2)\n end", "def speak_as_literal_punctuation_inherit(element)\n reverse_speak_as(element, 'literal-punctuation')\n reverse_speak_as(element, 'no-punctuation')\n\n isolate_text_node(element)\n\n visit(element, method(:speak_as_literal_punctuation))\n end", "def div(x, y)\n x / y\nend", "def tokenize_operator(&block) # :yields: SQLTree::Token\n operator = current_char\n if operator == '-' && /[\\d\\.]/ =~ peek_char\n tokenize_number(&block)\n else\n operator << next_char if SQLTree::Token::OPERATORS_HASH.has_key?(operator + peek_char)\n operator_class = SQLTree::Token.const_get(SQLTree::Token::OPERATORS_HASH[operator].to_s.upcase)\n handle_token(operator_class.new(operator), &block)\n end\n end", "def white_space\n if white_space?(@codes[@pos])\n begin\n @pos += 1\n end until !white_space?(@codes[@pos])\n return ECMA262::WhiteSpace.get\n else\n nil\n end\n end", "def test_bracket01\n a1 = [\"a\", \"\\n\\n\\n\", \"b\", \"\\n\\n\\n\", \"c\", \"\\n\\n\"]\n pt1 = Pt.new(a1)\n\n assert_equal pt1.to_a[0], pt1[0]\n assert_equal Pt::Paragraph, pt1[0].class\n assert_equal a1[0], pt1[0]\n assert_equal Pt::Paragraph.new(a1[0]), pt1[0]\n\n # negative or too-big out-of-bound begin\n assert_nil pt1[-99]\n assert_nil pt1[98]\n\n assert_equal pt1.class, pt1[0, 6].class\n assert_equal a1, pt1[0, 6].to_a\n assert_equal a1[0, 6], pt1[0, 6].to_a\n # oper = (IS_VER_2 ? :!= : :==) # Because PlainText::Part#== is redefined and pt1 is Part in Ruby 2, the following is unequal, whereas pt1 is Array in Ruby 3!\n oper = :!= # In Ver.0.8, it is redefined as unequal.\n assert_operator pt1[0, 6], oper, a1\n assert_operator a1, oper, pt1[0, 6]\n\n assert_equal a1[0, 2], pt1[0, 2].to_a\n assert_equal a1, pt1[0, 98].to_a\n assert_equal a1[0, 99], pt1[0, 98].to_a\n\n assert_equal pt1.class, pt1[0..1].class\n assert_equal a1[0..1], pt1[0..1].to_a\n assert_equal a1[0, 2], pt1[0..1].to_a\n assert_equal a1[0..5], pt1[0..5].to_a\n assert_equal a1, pt1[0..5].to_a\n assert_equal a1[0..99], pt1[0..99].to_a\n assert_equal a1, pt1[0..99].to_a\n assert_equal a1, pt1[0..-1].to_a\n assert_equal a1[-6..-1],pt1[-6..-1].to_a\n assert_equal a1, pt1[-6..-1].to_a\n assert_equal a1[-6..3], pt1[-6..3].to_a\n assert_equal a1[-6...4],pt1[-6...4].to_a\n\n assert_equal pt1[0..-1], pt1[0..99]\n assert_equal pt1[0, 6], pt1[0..-1]\n assert_equal pt1, pt1[0..99]\n\n pt2 = pt1[0, 4]\n assert_equal pt1.class, pt2.class\n assert_equal pt1.paras[0, 2], pt2.paras\n assert_equal pt1.boundaries[0, 2], pt2.boundaries\n\n # negative or too-big out-of-bound begin\n assert_nil a1[-99..2]\n assert_nil pt1[-99..2]\n assert_nil pt1[-99..-1]\n assert_nil pt1[98..99]\n\n # other out-of-bounds: Empty\n assert_equal a1[-2..2], pt1[-2..2].to_a\n assert_equal a1[-2...3], pt1[-2...3].to_a\n\n\n # Exception (Error)\n assert_raises(TypeError){ pt1['abc'] }\n assert_raises(TypeError){ a1[(?a)..(?c)] }\n assert_raises(TypeError){ pt1[(?a)..(?c)] }\n assert_raises(ArgumentError){ pt1[0, 1] }\n assert_raises(ArgumentError){ pt1[1, 2] }\n\n # Special cases, where the first index (or begin) is equal to size (as described in the reference) \n # @see https://docs.ruby-lang.org/ja/latest/class/Array.html#I_--5B--5D\n assert_nil pt1[pt1.size]\n assert_nil pt1[pt1.size, -2]\n assert_raises(TypeError){ pt1[pt1.size, ?a] }\n assert_equal Pt.new([]), pt1[pt1.size, 2]\n assert_equal Pt.new([]), pt1[pt1.size, 98]\n assert_equal Pt.new([]), pt1[pt1.size..99]\n assert_equal Pt.new([]), pt1[pt1.size..1]\n end", "def division(value) \n\t\tdiv = Fraccion.new(0, 0)\n\t\taux = value.numer\n\t\tvalue.numer = value.denom\n\t\tvalue.denom = aux\n\t\tdiv.numer = @numer * value.numer\n\t\tdiv.denom = @denom * value.denom \n \treturn div \n\tend", "def factor()\n node = nil\n\n if accept(:operator_not)\n node = Nodes::Not.new()\n node.child = factor()\n elsif accept(:paren_open)\n node = expression()\n expect(:paren_close)\n else\n node = subexpression()\n end\n\n node\n end", "def token!\r\n # at line 1:8: ( T__6 | NUMBER | SPACE )\r\n alt_3 = 3\r\n case look_3 = @input.peek( 1 )\r\n when 0x2b then alt_3 = 1\r\n when 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39 then alt_3 = 2\r\n when 0x20 then alt_3 = 3\r\n else\r\n raise NoViableAlternative( \"\", 3, 0 )\r\n end\r\n case alt_3\r\n when 1\r\n # at line 1:10: T__6\r\n t__6!\r\n\r\n when 2\r\n # at line 1:15: NUMBER\r\n number!\r\n\r\n when 3\r\n # at line 1:22: SPACE\r\n space!\r\n\r\n end\r\n end", "def div!(*args)\n dotop!(RAtlas::method(:div!), RAtlas::method(:mdiv!),\n *args)\n end", "def _cont(p)\n\n _save = self.pos\n while true # choice\n _tmp = apply_with_args(:_scont, p)\n break if _tmp\n self.pos = _save\n _save1 = self.pos\n _tmp = match_string(\"(\")\n _tmp = _tmp ? nil : true\n self.pos = _save1\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_cont unless _tmp\n return _tmp\n end", "def /(p0) end", "def parse_char_class\n pos = @s.pos\n if char_group = parse_char_group\n return char_group\n end\n @s.pos = pos\n\n c = parse_char_group_char\n if !c\n return\n end\n if @s.scan(/-/)\n CharRange.new c, expect(:parse_char_group_char)\n else\n CharRange.new c, c\n end\n end", "def process_selector(selector)\n # Parse the first piece as a selector, defaulting to DIV tag if none is specified\n if selector.length > 0 and ['#', '.'].count(selector[0]) > 0\n @tag = 'div'\n else\n delimiter_index = nil\n i = 0\n for char in selector.split(\"\") do\n if ['#', '.'].count(char) > 0\n delimiter_index = i\n break\n end\n i += 1\n end\n \n if delimiter_index == nil\n @tag = selector * 1\n selector = \"\"\n else\n @tag = selector[0..delimiter_index-1]\n selector = selector[@tag.length..-1]\n end\n end\n \n @tag_id = nil\n @tag_classes = []\n while true do\n next_delimiter_index = nil\n if selector == \"\"\n break\n \n else\n i = 0\n for char in selector.split(\"\") do\n if i > 0 and ['#', '.'].count(char) > 0\n next_delimiter_index = i\n break\n end\n i += 1\n end\n \n if next_delimiter_index == nil\n if selector[0] == '#'\n @tag_id = selector[1..-1]\n elsif selector[0] == \".\"\n @tag_classes << selector[1..-1]\n end\n \n selector = \"\"\n \n else\n if selector[0] == '#'\n @tag_id = selector[1..next_delimiter_index-1]\n elsif selector[0] == \".\"\n @tag_classes << selector[1..next_delimiter_index-1]\n end\n \n selector = selector[next_delimiter_index..-1]\n end\n end\n end\n \n self\n end", "def DISABLED_test_multiplication_addition_and_division\n assert_parses_to [DecimalToken.new(0),\n MultiplyOpToken.instance,\n IntegerToken.new(2),\n AddOpToken.instance,\n IntegerToken.new(3),\n DivideOpToken.instance,\n IntegerToken.new(4)],\n ExpressionNode.new(\n TermNode.new(\n FactorNode.new(\n BaseNode.new(\n DecimalToken.new(0))),\n TermPrimeNode.new(\n MultiplyOpToken.instance,\n FactorNode.new(\n BaseNode.new(\n IntegerToken.new(2))),\n TermPrimeNode.new)),\n ExpressionPrimeNode.new(\n AddOpToken.instance,\n TermNode.new(\n FactorNode.new(\n BaseNode.new(\n IntegerToken.new(3))),\n TermPrimeNode.new(\n DivideOpToken.instance,\n FactorNode.new(\n BaseNode.new(\n IntegerToken.new(4))),\n TermPrimeNode.new)),\n ExpressionPrimeNode.new))\n end", "def test_speak_as_literal_punctuation\n style = \"#speak-as-lp-local,\n #speak-as-lp-inherit,\n #speak-as-lp-local-ignore,\n #speak-as-lp-inherit-ignore {\n speak-as: literal-punctuation;\n }\"\n html_parser = Hatemile::Util::Html::NokogiriLib::NokogiriHTMLDOMParser.new(\n \"<!DOCTYPE html>\n <html>\n <head>\n <title>HaTeMiLe Tests</title>\n <meta charset=\\\"UTF-8\\\" />\n </head>\n <body>\n <span id=\\\"speak-as-lp-local\\\">Speak this text.</span>\n <div id=\\\"speak-as-lp-inherit\\\">\n Speak <strong>this text.</strong>\n </div>\n <span id=\\\"speak-as-lp-local-ignore\\\" #{DATA_IGNORE}>\n Speak this text.\n </span>\n <div id=\\\"speak-as-lp-inherit-ignore\\\" #{DATA_IGNORE}>\n Speak <strong>this text.</strong>\n </div>\n </body>\n </html>\"\n )\n css_parser = Hatemile::Util::Css::Rcp::RCPParser.new(style)\n css = Hatemile::Implementation::AccessibleCSSImplementation.new(\n html_parser,\n css_parser,\n @configure\n )\n css.provide_all_speak_properties\n speak_as_lp_local = html_parser.find('#speak-as-lp-local').first_result\n speak_as_lp_inherit = html_parser.find('#speak-as-lp-inherit').first_result\n speak_as_lp_local_ignore = html_parser.find(\n '#speak-as-lp-local-ignore'\n ).first_result\n speak_as_lp_inherit_ignore = html_parser.find(\n '#speak-as-lp-inherit-ignore'\n ).first_result\n\n assert_equal(\n 'Speak this text Dot .',\n speak_as_lp_local.get_text_content\n )\n assert_equal(\n 'Speak this text Dot .',\n speak_as_lp_inherit.get_text_content.strip\n )\n assert_equal(\n 'Speak this text.',\n speak_as_lp_local_ignore.get_text_content.strip\n )\n assert_equal(\n 'Speak this text.',\n speak_as_lp_inherit_ignore.get_text_content.strip\n )\n end", "def number\n result = ''\n while @current_char and @current_char =~ /[[:digit:]]/\n result << @current_char\n advance\n end\n\n if @current_char == '.'\n result << @current_char\n advance\n while @current_char and @current_char =~ /[[:digit:]]/\n result << @current_char\n advance\n end\n Token.new(:real_const, result.to_f)\n else\n Token.new(:integer_const, result.to_i)\n end\n end", "def test_operators_that_dont_belong_together\n tok = initialize_tokenizer( <<-EOD\n +*\n EOD\n )\n\n assert_equal('+', tok.next.token.value) \n assert_equal('*', tok.next.token.value) \n end", "def _eof_comment\n\n _save = self.pos\n begin # sequence\n _tmp = match_string(\"#\")\n break unless _tmp\n while true # kleene\n\n _save1 = self.pos\n begin # sequence\n _save2 = self.pos\n _tmp = apply(:_eof)\n _tmp = !_tmp\n self.pos = _save2\n break unless _tmp\n _tmp = match_dot\n end while false\n unless _tmp\n self.pos = _save1\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true # end kleene\n end while false\n unless _tmp\n self.pos = _save\n end # end sequence\n\n set_failed_rule :_eof_comment unless _tmp\n return _tmp\n end", "def division\n return @division\n end", "def pre_divide; end", "def puncture\n end", "def divu(s, t)\n div(s, t)\n end", "def consume_numeric\n number = consume_number\n repr = number[0]\n value = number[1]\n type = number[2]\n\n if type == :integer\n value = value.to_i\n else\n value = value.to_f\n end\n\n if start_identifier?(@s.peek(3))\n create_token(:dimension,\n :repr => repr,\n :type => type,\n :unit => consume_name,\n :value => value)\n\n elsif @s.peek == '%'\n @s.consume\n\n create_token(:percentage,\n :repr => repr,\n :type => type,\n :value => value)\n\n else\n create_token(:number,\n :repr => repr,\n :type => type,\n :value => value)\n end\n end", "def div(other)\n divmod(other).first\n end", "def _Comment\n\n _save = self.pos\n while true # sequence\n _tmp = apply(:__hyphen_)\n unless _tmp\n self.pos = _save\n break\n end\n _tmp = match_string(\"//\")\n unless _tmp\n self.pos = _save\n break\n end\n while true\n\n _save2 = self.pos\n while true # sequence\n _save3 = self.pos\n _tmp = apply(:_Nl)\n _tmp = _tmp ? nil : true\n self.pos = _save3\n unless _tmp\n self.pos = _save2\n break\n end\n _tmp = get_byte\n unless _tmp\n self.pos = _save2\n end\n break\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true\n unless _tmp\n self.pos = _save\n break\n end\n _tmp = apply(:_Nl)\n unless _tmp\n self.pos = _save\n break\n end\n while true\n _tmp = apply(:_EmptyLine)\n break unless _tmp\n end\n _tmp = true\n unless _tmp\n self.pos = _save\n end\n break\n end # end sequence\n\n set_failed_rule :_Comment unless _tmp\n return _tmp\n end", "def _sp\n while true\n\n _save1 = self.pos\n while true # choice\n _tmp = match_string(\" \")\n break if _tmp\n self.pos = _save1\n _tmp = match_string(\"\\t\")\n break if _tmp\n self.pos = _save1\n _tmp = apply(:_comment)\n break if _tmp\n self.pos = _save1\n break\n end # end choice\n\n break unless _tmp\n end\n _tmp = true\n set_failed_rule :_sp unless _tmp\n return _tmp\n end", "def consume\n return nil if @s.eos?\n\n @s.mark\n\n # Consume comments.\n if comment_token = consume_comments\n if @options[:preserve_comments]\n return comment_token\n else\n return consume\n end\n end\n\n # Consume whitespace.\n return create_token(:whitespace) if @s.scan(RE_WHITESPACE)\n\n char = @s.consume\n\n case char.to_sym\n when :'\"'\n consume_string\n\n when :'#'\n if @s.peek =~ RE_NAME || valid_escape?(@s.peek(2))\n create_token(:hash,\n :type => start_identifier?(@s.peek(3)) ? :id : :unrestricted,\n :value => consume_name)\n else\n create_token(:delim, :value => char)\n end\n\n when :'$'\n if @s.peek == '='\n @s.consume\n create_token(:suffix_match)\n else\n create_token(:delim, :value => char)\n end\n\n when :\"'\"\n consume_string\n\n when :'('\n create_token(:'(')\n\n when :')'\n create_token(:')')\n\n when :*\n if @s.peek == '='\n @s.consume\n create_token(:substring_match)\n\n # Non-standard: Preserve the IE * hack.\n elsif @options[:preserve_hacks] && @s.peek =~ RE_NAME_START\n @s.reconsume\n consume_ident\n\n else\n create_token(:delim, :value => char)\n end\n\n when :+\n if start_number?\n @s.reconsume\n consume_numeric\n else\n create_token(:delim, :value => char)\n end\n\n when :','\n create_token(:comma)\n\n when :-\n nextTwoChars = @s.peek(2)\n nextThreeChars = char + nextTwoChars\n\n if start_number?(nextThreeChars)\n @s.reconsume\n consume_numeric\n elsif nextTwoChars == '->'\n @s.consume\n @s.consume\n create_token(:cdc)\n elsif start_identifier?(nextThreeChars)\n @s.reconsume\n consume_ident\n else\n create_token(:delim, :value => char)\n end\n\n when :'.'\n if start_number?\n @s.reconsume\n consume_numeric\n else\n create_token(:delim, :value => char)\n end\n\n when :':'\n create_token(:colon)\n\n when :';'\n create_token(:semicolon)\n\n when :<\n if @s.peek(3) == '!--'\n @s.consume\n @s.consume\n @s.consume\n\n create_token(:cdo)\n else\n create_token(:delim, :value => char)\n end\n\n when :'@'\n if start_identifier?(@s.peek(3))\n create_token(:at_keyword, :value => consume_name)\n else\n create_token(:delim, :value => char)\n end\n\n when :'['\n create_token(:'[')\n\n when :'\\\\'\n if valid_escape?\n @s.reconsume\n consume_ident\n else\n # Parse error.\n create_token(:delim,\n :error => true,\n :value => char)\n end\n\n when :']'\n create_token(:']')\n\n when :'^'\n if @s.peek == '='\n @s.consume\n create_token(:prefix_match)\n else\n create_token(:delim, :value => char)\n end\n\n when :'{'\n create_token(:'{')\n\n when :'}'\n create_token(:'}')\n\n when :U, :u\n if @s.peek(2) =~ RE_UNICODE_RANGE_START\n @s.consume\n consume_unicode_range\n else\n @s.reconsume\n consume_ident\n end\n\n when :|\n case @s.peek\n when '='\n @s.consume\n create_token(:dash_match)\n\n when '|'\n @s.consume\n create_token(:column)\n\n else\n create_token(:delim, :value => char)\n end\n\n when :~\n if @s.peek == '='\n @s.consume\n create_token(:include_match)\n else\n create_token(:delim, :value => char)\n end\n\n else\n case char\n when RE_DIGIT\n @s.reconsume\n consume_numeric\n\n when RE_NAME_START\n @s.reconsume\n consume_ident\n\n else\n create_token(:delim, :value => char)\n end\n end\n end", "def divDeComplejos(c)\n\t\tif c.class != NumCom && c.class != Float && c.class != Fixnum && c.class != Bignum\n\t\t\treturn \"El numero rescibido no es valido\"\n\t\t\texit\n\t\telse\n\t\t\ta2 = (@r * c.getReal + @i * c.getImaginario)/(c.getReal * c.getReal + c.getImaginario * c.getImaginario)\n\t\t\tb2 = (@i * c.getReal - @r * c.getImaginario)/(c.getReal * c.getReal + c.getImaginario * c.getImaginario)\n\t\tend\n\t\treturn NumCom.new(a2,b2)\n\tend", "def division=(div)\n @division = div if div.class == Division\n @division ||= Division.new(div) if div.respond_to?(:each_pair)\n @division ||= Rarity::default_division\n end", "def parse_unary_operation cur_tok\n\n\t\t# Check if UnaryOperation or a Term.\n\t\tif cur_tok and cur_tok.type == \"Operator\" and cur_tok.value == \"`\" and @tokens.peak and @tokens.peak.type == \"Identifier\"\n\t\t\tif @tokens.peak.value == \"i\"\n\t\t\t\[email protected]\n\t\t\t\tcur_ast = Term.new(cur_tok.line, cur_tok.col, imaginary: true) # No magnitude as default = 1\n\t\t\telse\n\t\t\t\tcur_ast = Term.new(cur_tok.line, cur_tok.col, literal_variable: @tokens.next.value) # No magnitude as default = 1\n\t\t\tend\n\t\telse\n\t\t\tcur_ast = UnaryOperation.new(cur_tok.line, cur_tok.col, parse_single_token(cur_tok), parse_next(true))\n\t\tend\n\n\t\t# Use look ahead to see if the UnaryOperation needs to be a child node of a different ast.\n\t\t# This is the case when is_operation is true.\n\t\tif is_operation \n\t\t\treturn parse_operation(cur_ast, true)\n\t\telse\n\t\t\treturn cur_ast \n\t\tend\n\tend", "def consume\n return nil if @s.eos?\n\n @s.mark\n return create_token(:whitespace) if @s.scan(RE_WHITESPACE)\n\n char = @s.consume\n\n case char.to_sym\n when :'\"'\n consume_string('\"')\n\n when :'#'\n if @s.peek =~ RE_NAME || valid_escape?\n create_token(:hash,\n :type => start_identifier? ? :id : :unrestricted,\n :value => consume_name)\n else\n create_token(:delim, :value => char)\n end\n\n when :'$'\n if @s.peek == '='\n @s.consume\n create_token(:suffix_match)\n else\n create_token(:delim, :value => char)\n end\n\n when :\"'\"\n consume_string(\"'\")\n\n when :'('\n create_token(:'(')\n\n when :')'\n create_token(:')')\n\n when :*\n if @s.peek == '='\n @s.consume\n create_token(:substring_match)\n\n elsif @options[:preserve_hacks] && @s.peek =~ RE_NAME_START\n # NON-STANDARD: IE * hack\n @s.reconsume\n consume_ident\n\n else\n create_token(:delim, :value => char)\n end\n\n when :+\n if start_number?(char + @s.peek(2))\n @s.reconsume\n consume_numeric\n else\n create_token(:delim, :value => char)\n end\n\n when :','\n create_token(:comma)\n\n when :-\n if start_number?(char + @s.peek(2))\n @s.reconsume\n consume_numeric\n elsif start_identifier?(char + @s.peek(2))\n @s.reconsume\n consume_ident\n elsif @s.peek(2) == '->'\n @s.consume\n @s.consume\n create_token(:cdc)\n else\n create_token(:delim, :value => char)\n end\n\n when :'.'\n if start_number?(char + @s.peek(2))\n @s.reconsume\n consume_numeric\n else\n create_token(:delim, :value => char)\n end\n\n when :/\n if @s.peek == '*'\n @s.consume\n\n if text = @s.scan_until(RE_COMMENT_CLOSE)\n text.slice!(-2, 2)\n else\n text = @s.consume_rest\n end\n\n if @options[:preserve_comments]\n create_token(:comment, :value => text)\n else\n consume\n end\n else\n create_token(:delim, :value => char)\n end\n\n when :':'\n create_token(:colon)\n\n when :';'\n create_token(:semicolon)\n\n when :<\n if @s.peek(3) == '!--'\n @s.consume\n @s.consume\n @s.consume\n\n create_token(:cdo)\n else\n create_token(:delim, :value => char)\n end\n\n when :'@'\n if start_identifier?\n create_token(:at_keyword, :value => consume_name)\n else\n create_token(:delim, :value => char)\n end\n\n when :'['\n create_token(:'[')\n\n when :'\\\\'\n if valid_escape?(char + @s.peek)\n @s.reconsume\n consume_ident\n else\n create_token(:delim,\n :error => true,\n :value => char)\n end\n\n when :']'\n create_token(:']')\n\n when :'^'\n if @s.peek == '='\n @s.consume\n create_token(:prefix_match)\n else\n create_token(:delim, :value => char)\n end\n\n when :'{'\n create_token(:'{')\n\n when :'}'\n create_token(:'}')\n\n when :U, :u\n if @s.peek(2) =~ RE_UNICODE_RANGE_START\n @s.consume\n consume_unicode_range\n else\n @s.reconsume\n consume_ident\n end\n\n when :|\n case @s.peek\n when '='\n @s.consume\n create_token(:dash_match)\n\n when '|'\n @s.consume\n create_token(:column)\n\n else\n create_token(:delim, :value => char)\n end\n\n when :~\n if @s.peek == '='\n @s.consume\n create_token(:include_match)\n else\n create_token(:delim, :value => char)\n end\n\n else\n case char\n when RE_DIGIT\n @s.reconsume\n consume_numeric\n\n when RE_NAME_START\n @s.reconsume\n consume_ident\n\n else\n create_token(:delim, :value => char)\n end\n end\n end", "def no_modulo(i_dividend, i_divisor)\n a = i_dividend / i_divisor\n b = a * i_divisor\n c = i_dividend - b\nend", "def fdiv(arg0)\n end", "def comment(_lexeme, character)\n if character =~ /./\n :comment\n else\n :default\n end\n end", "def process_character( node )\n result = nil\n \n case node.type.name\n when \"general_character\"\n result = node.text.codepoints[0]\n \n when \"escape_sequence\"\n if @@escape_sequences.member?(node.text) then\n result = @@escape_sequences[node.text].codepoints[0]\n else\n result = node.text.codepoints[1]\n end\n \n when \"unicode_sequence\"\n result = node.text.slice(2..-1).to_i(16)\n\n else\n nyi( \"support for node type [#{node.type}]\", node )\n end\n \n return result\n end", "def parse_factor\n case current\n when :L_PARANTH then\n expect(:L_PARANTH)\n exp = parse_expression\n expect(:R_PARANTH)\n\n exp\n when :ID then\n parse_variable_exp\n when :KW_NEW then\n parse_class_init\n else\n parse_constant\n end\n end", "def divide\n match \"/\"\n comment \"/\"\n factor\n emitln \"movl %eax, %ebx\"\n emitln \"movl -(0x8*#{$stackdepth})(%rsp), %eax\"\n emitln \"cltd\"\n emitln \"idivl %ebx\"\nend", "def divmod(arg0)\n end", "def divmod(arg0)\n end", "def parse(tokens)\n tokens.delete_if { |t| t == '*' } # multiplication is the default\n tokens.map! do |token|\n # singularize, but make sure not to replace seconds with ''\n token == 's' ? token : token.singularize\n end\n\n # only supports one instance of division\n division = tokens.index('/')\n tokens.delete('/')\n\n # units :: [CUnit]\n units = tokens.map do |token|\n unit = Unit.find_by unit_name: token\n if unit.nil?\n unit = Unit.where('? = ANY(symbols)', token).first\n end\n if unit.nil?\n raise \"No unit found named #{token}.\"\n end\n CUnit.new(unit, 1)\n end\n\n unless division.nil?\n numerator = units[0...division]\n denominator = units[division..-1].map do |u|\n u.exponent *= -1\n u\n end\n\n units == numerator + denominator\n end\n\n return units\n end", "def /(rhs)\n self.class.from_ptr(C.LLVMConstFDiv(self, rhs))\n end", "def css_def_pos(css, pos, depth = -1)\n to = open_brace_pos(css, pos, depth)\n prev_def = to - (css[0..to].reverse.index('}') || to) + 1\n from = prev_def + 1 + (css[prev_def + 1..-1] =~ %r(^\\s*[^\\s/]))\n (from..to - 1)\n end", "def div\n x, y = stack.pop(2)\n push x / y\n end", "def _HtmlUnclosedType\n\n _save = self.pos\n while true # choice\n _tmp = match_string(\"HR\")\n break if _tmp\n self.pos = _save\n _tmp = match_string(\"hr\")\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_HtmlUnclosedType unless _tmp\n return _tmp\n end", "def DISABLED_test_decimal_divide_sequence\n assert_tokenises_as '2./2.0', DecimalToken.new(2), DivideOpToken.instance, DecimalToken.new(2)\n assert_tokenises_as '2.0/-2.', DecimalToken.new(2), DivideOpToken.instance, DecimalToken.new(-2)\n end", "def selector_for_pos(css, pos, depth = -1)\n css[css_def_pos(css, pos, depth)].dup.strip\n end", "def token\n ready_token\n\n i = @buffer.index(/[\\[\\]()<>{}\\s\\/]/) || @buffer.size\n\n token_chars =\n if i == 0 and @buffer[i,2] == \"<<\" then 2\n elsif i == 0 and @buffer[i,2] == \">>\" then 2\n elsif i == 0 then 1\n else i\n end\n\n strip_space = !(i == 0 and @buffer[0,1] == '(')\n tok = head(token_chars, strip_space)\n\n if tok == \"\"\n nil\n elsif tok[0,1] == \"%\"\n @buffer = \"\"\n token\n else\n tok\n end\n end", "def parse_char_group\n if res = @s.scan(/\\\\[dDwWhHsS]|\\./)\n CharGroupPredef.new Token.new(\"char-group.predef\", res)\n elsif res = @s.scan(/\\\\p\\{[A-Z][a-z]*\\}/)\n raise \"unicode char class not supported: #{@s.inspect}\"\n else\n beg = @s.scan(/\\[\\^?/)\n if !beg\n return\n end\n if @s.scan(/(?=[^\\]]+\\z)/)\n raise \"char group not closed: #{@s.inspect}\"\n end\n\n classes = []\n c = parse_char_class\n if !c\n raise \"expect char class: #{@s.inspect}\"\n end\n classes << c\n while c = parse_char_class\n classes << c\n end\n\n ed = @s.scan(/\\]/)\n if !ed\n raise \"expect ']': #{@s.inspect}\"\n end\n\n BracketCharGroup.new Token.new(\"begin.char-group\", beg), classes\n end\n end", "def punctuation?\n PUNCTUATION.include? @kind\n end", "def parse_char_group_char\n if c = @s.scan(/\\\\[ftnr]/)\n (eval \"\\\"#{c}\\\"\").ord\n elsif c = @s.scan(/\\\\[^\\n]/)\n c[1].ord\n elsif c = @s.scan(/[^\\n\\\\\\/\\[\\]]/)\n c.ord\n end\n end", "def next_token\n result = peek_token\n @start = @finish\n return result if @start >= @expr.length\n\n if @expr[@start].numeric?\n @finish = @start + 1\n while @finish < @expr.length && @expr[@finish].to_s.numeric?\n @finish = @finish + 1\n end\n else\n @finish = @start + 1\n end\n result\n end" ]
[ "0.6782242", "0.5594302", "0.55533934", "0.5289627", "0.52322054", "0.5069714", "0.49227703", "0.486671", "0.48258868", "0.47825617", "0.47686204", "0.47656333", "0.47576725", "0.47445497", "0.47300968", "0.47278628", "0.46645477", "0.46503067", "0.45705864", "0.45445508", "0.45118567", "0.44759315", "0.4474237", "0.44603765", "0.4459673", "0.44000348", "0.43947625", "0.43707713", "0.43566477", "0.43557432", "0.43407312", "0.43405318", "0.43260318", "0.43164667", "0.42994413", "0.42992738", "0.42976925", "0.42921802", "0.4289266", "0.42845282", "0.42652327", "0.42617834", "0.42581496", "0.42511794", "0.425072", "0.4249491", "0.42321467", "0.4232127", "0.42201295", "0.42011538", "0.419638", "0.41944534", "0.41696805", "0.41640916", "0.4159501", "0.41562375", "0.41522685", "0.41497836", "0.4149125", "0.4147313", "0.414104", "0.41311044", "0.4127436", "0.4119463", "0.41120312", "0.4106928", "0.41028932", "0.40941516", "0.40895227", "0.40864107", "0.40864098", "0.40751997", "0.40748176", "0.40743992", "0.4063155", "0.40583974", "0.40571818", "0.40569326", "0.40489316", "0.40392628", "0.40328276", "0.40241402", "0.4024034", "0.40239826", "0.40204257", "0.40202102", "0.40078032", "0.40078032", "0.40049815", "0.40000883", "0.39952764", "0.39884633", "0.39798203", "0.39747566", "0.39730605", "0.39726847", "0.39685446", "0.39667428", "0.39657366", "0.39563024" ]
0.83846706
0
Tests next literal is RegExp or not. If literal is RegExp return ECMA262::ECMA262RegExp object and forward lexical parser position. Otherwise return nil and position is not changed.
def regexp_literal # RegularExpressionLiteral:: # / RegularExpressionBody / RegularExpressionFlags pos0 = @pos return nil unless @codes[@pos] == 0x2f body = regexp_body flags = regexp_flags return ECMA262::ECMA262RegExp.new(body, flags) end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def regexp(r0, which)\n source, stop_index = r0.source, r0.stop_index\n return factor_result(source, stop_index, stop_index+$&.length) \\\n if source.index(@regexps[which],stop_index)==stop_index\n terminal_parse_failure(r0, which)\n end", "def supports_regexp?\n true\n end", "def supports_regexp?\n true\n end", "def lexer_regexp; LEXER_REGEXP end", "def regex_token\n return if @chunk[0] != '/'\n length = heregex_token\n return length if length\n prev = @tokens[-1]\n return if prev && (prev.spaced ? NOT_REGEX : NOT_SPACED_REGEX).include?(prev[0])\n return unless match = @chunk.match(REGEX)\n match, regex, flags = *match\n # Avoid conflicts with floor division operator.\n return if regex == '//'\n if regex[0..1] == '/*'\n raise_syntax_error! 'regular expressions cannot begin with `*`'\n end\n token(:REGEX, \"#{regex}#{flags}\", 0, match.size)\n match.size\n end", "def regexp\n @regexp ||= Regexp.compile(source.to_s, Regexp::IGNORECASE)\n end", "def match(regexp); end", "def regexp; end", "def regexp; end", "def regexp=(_arg0); end", "def lex_en_regexp_modifiers; end", "def lex_en_regexp_modifiers; end", "def lex_en_regexp_modifiers; end", "def lex_en_regexp_modifiers=(_arg0); end", "def lex_en_regexp_modifiers=(_arg0); end", "def lex_en_regexp_modifiers=(_arg0); end", "def test_regexp\n# (find-node \"(emacs-ja)Regexps\")\n \n conv = lambda{|from,to| assert_equal(to, el4r_conv_regexp(from)) }\n conv[ //, '' ]\n conv[ /a/, 'a' ]\n conv[ /a./, 'a.' ]\n conv[ /a*/, 'a*' ]\n conv[ /a+/, 'a+' ]\n conv[ /a?/, 'a?' ]\n conv[ /[ab]/, '[ab]' ]\n conv[ /[^ab]/, '[^ab]' ]\n conv[ /^ab/, '^ab' ]\n conv[ /ab$/, 'ab$' ]\n conv[ /a|b/, 'a\\|b' ]\n conv[ /(ab)/, '\\(ab\\)' ]\n conv[ /\\As/, '\\`s' ]\n conv[ /s\\Z/, %q[s\\'] ]\n # \\=\n conv[ /\\bball\\B/, '\\bball\\B']\n # \\<\n # \\>\n conv[ /\\w/, '[0-9A-Za-z_]']\n conv[ /\\W/, '[^0-9A-Za-z_]']\n # \\sC\n # \\SC\n # \\D (number)\n end", "def next_regex regex\n if regex.is_a? Symbol\n regex = @text_patterns[regex]\n raise \"Pattern specified #{regex} does not exist in text_patterns \" unless regex\n end\n @last_regex = regex\n find_more\n end", "def match re\n unless re.inspect[1..2] == \"\\\\A\"\n a = re.inspect\n a[0] = \"/\\\\A\"\n re = eval(a)\n end\n @last_match = remaining.match(re)\n end", "def to_regexp\n case expr.first\n when :hex\n Regexp.new(translate_codepoints(expr[1]))\n when :istr\n /#{expr.last}/ui\n when :range\n Regexp.new(\"[#{translate_codepoints(expr[1])}]\")\n else\n raise \"Can't turn #{expr.inspect} into a regexp\"\n end\n end", "def regexp_with_working_captures?(node); end", "def regex?\n @name.is_a?(Regexp)\n end", "def regex?\n @name.is_a?(Regexp)\n end", "def convert_to_regexp(name, regexp)\n return regexp if Regexp == regexp\n begin\n return Regexp.new(regexp)\n rescue\n @errors << \"Invalid pattern: #{name} regexp: #{regexp} error: #{$!}\"\n end\n end", "def regex(_obj)\n raise NotImplementedError\n end", "def code_regexp\n unless defined?(@code_regexp)\n @code_regexp = nil\n\n if reg_str = vocab_entry.format_regexp\n @code_regexp = Regexp.union(Regexp.new(reg_str, Regexp::IGNORECASE), /\\A\\*\\Z/)\n end\n end\n @code_regexp\n end", "def to_regexp\n\t\t\treturn @regexp\n\t\tend", "def regex\n Regexp.new(@str)\n end", "def to_regexp\n Regexp.new(@value)\n end", "def regexp(exp)\n begin\n match = exp.match(@data)\n yield match if match\n rescue ArgumentError # velmi zriedkava UTF8 chyba\n end\n end", "def re; end", "def start_re; end", "def regexp\n return @regexp if @regexp\n placeholder = '___PLACEHOLDER___'\n @regexp = @base_theme.dup\n # Install placeholders for the variable data\n @regexp.gsub!(VARIABLE_MATCHER) { placeholder }\n # Strip the header comments\n @regexp.gsub! /.*^\\*\\/\\s*/m, ''\n # Collapse all whitespace\n @regexp.gsub! /\\s+/, ' '\n # Escape the literal strings\n @regexp = Regexp.escape(@regexp)\n # Whitespace means nothing\n @regexp.gsub! /\\\\\\ /, '\\s+'\n # Fast variable finder\n @regexp.gsub! placeholder, '([^;]*|\\S*)'\n # Get 'er done\n @regexp = Regexp.new(@regexp)\n end", "def =~(str)\n str = str.to_s if str.is_a?(Symbol)\n # unless str.nil? because it's nil and only nil, not false.\n str = StringValue(str) unless str.nil?\n\n match = match_from(str, 0)\n if match\n Regexp.last_match = match\n return match.begin(0)\n else\n Regexp.last_match = nil\n return nil\n end\n end", "def regex_value\n regex.random_example\n rescue RegexpExamples::IllegalSyntaxError\n nil\n end", "def init_regex(regex)\n optimized_regex=/\\A#{regex}/ # anchor the search\n self.parser=lambda do |parent_node|\n offset = parent_node.next\n if parent_node.src[offset..-1].index(optimized_regex)==0\n range = $~.offset(0)\n range = (range.min+offset)..(range.max+offset)\n TerminalNode.new(parent_node,range,regex)\n end\n end\n self.terminal=true\n end", "def peek_next\n fail 'No string specified' unless @str\n\n return Token.new(:eos) if skip_space == :eos\n\n PATTERNS.each do |re, func|\n re.match(@str) do |mat|\n @last_re = re # This is what will be removed\n mat = mat.to_s\n return func.is_a?(Symbol) ? send(func, mat) : instance_exec(mat, &func)\n end\n end\n end", "def check_assignment_or_regexp(assignment, regexp)\n return @assigned_paragraph_type == assignment if @assigned_paragraph_type\n return @line =~ regexp\n end", "def find_regex_token\n best_token = nil\n best_length = 0\n\n # I tried optimizing based on the first char, but it had\n # a slightly negative affect and was a good bit more complicated.\n TOKENS.regex_tokens.each do |token|\n if length = @scanner.match?(token.regex) and token.acceptable?(lexing_context)\n # We've found a longer match\n if length > best_length\n best_length = length\n best_token = token\n end\n end\n end\n\n return best_token, @scanner.scan(best_token.regex) if best_token\n end", "def match(str=nil)\n return DelayedMatchConstructor.new unless str\n \n return Atoms::Re.new(str)\n end", "def match?(regexp, string, pos = 0)\n !!regexp.match(string, pos)\n end", "def regexp_matcher regexp\n lambda do |string, index = 0, counts:|\n found = regexp.match(string, index)\n result_string = found.to_s\n\n if found && found.begin(0) == index && !result_string.empty?\n result_string\n end\n end\n end", "def positional_match_or_nil(source, re, position)\n md = source.match(re)\n matched_substr = md && md[position]\n yield(matched_substr) if matched_substr\n matched_substr\n end", "def regexp\n s = Regexp.escape self\n s.gsub! /\\\\\\?/, '[^/]'\n s.gsub! /\\\\\\*/, '[^/]*'\n s.gsub! /\\\\\\[!/, '[^'\n s.gsub! /\\\\\\]/, ']'\n s.gsub! /\\\\\\{/, '('\n s.gsub! /,/, '|'\n s.gsub! /\\\\\\}/, ')'\n Regexp.new s\n end", "def expression\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 28 )\n return_value = ExpressionReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n root_0 = nil\n\n _last = _first_0 = nil\n char_literal106 = nil\n char_literal110 = nil\n string_literal113 = nil\n __SLASH_ASGN116__ = nil\n __MOD_ASGN119__ = nil\n string_literal122 = nil\n string_literal125 = nil\n string_literal128 = nil\n string_literal131 = nil\n string_literal134 = nil\n string_literal137 = nil\n string_literal140 = nil\n string_literal143 = nil\n string_literal146 = nil\n char_literal149 = nil\n string_literal152 = nil\n char_literal155 = nil\n char_literal158 = nil\n string_literal161 = nil\n string_literal164 = nil\n string_literal167 = nil\n char_literal170 = nil\n char_literal173 = nil\n __MOD176__ = nil\n __SLASH179__ = nil\n char_literal182 = nil\n string_literal185 = nil\n string_literal188 = nil\n string_literal191 = nil\n string_literal194 = nil\n string_literal197 = nil\n string_literal200 = nil\n char_literal203 = nil\n char_literal206 = nil\n string_literal209 = nil\n string_literal212 = nil\n string_literal215 = nil\n string_literal217 = nil\n string_literal219 = nil\n string_literal221 = nil\n string_literal223 = nil\n __UPLUS225__ = nil\n __UMINUS227__ = nil\n char_literal229 = nil\n char_literal231 = nil\n __POST_INCR233__ = nil\n __POST_DECR235__ = nil\n __AREF237__ = nil\n char_literal240 = nil\n __CALL243__ = nil\n __ITER246__ = nil\n string_literal249 = nil\n expression107 = nil\n expression108 = nil\n expression109 = nil\n expression111 = nil\n expression112 = nil\n expression114 = nil\n expression115 = nil\n expression117 = nil\n expression118 = nil\n expression120 = nil\n expression121 = nil\n expression123 = nil\n expression124 = nil\n expression126 = nil\n expression127 = nil\n expression129 = nil\n expression130 = nil\n expression132 = nil\n expression133 = nil\n expression135 = nil\n expression136 = nil\n expression138 = nil\n expression139 = nil\n expression141 = nil\n expression142 = nil\n expression144 = nil\n expression145 = nil\n expression147 = nil\n expression148 = nil\n expression150 = nil\n expression151 = nil\n expression153 = nil\n expression154 = nil\n expression156 = nil\n expression157 = nil\n expression159 = nil\n expression160 = nil\n expression162 = nil\n expression163 = nil\n expression165 = nil\n expression166 = nil\n expression168 = nil\n expression169 = nil\n expression171 = nil\n expression172 = nil\n expression174 = nil\n expression175 = nil\n expression177 = nil\n expression178 = nil\n expression180 = nil\n expression181 = nil\n expression183 = nil\n expression184 = nil\n expression186 = nil\n expression187 = nil\n expression189 = nil\n expression190 = nil\n expression192 = nil\n expression193 = nil\n expression195 = nil\n expression196 = nil\n expression198 = nil\n expression199 = nil\n expression201 = nil\n expression202 = nil\n expression204 = nil\n expression205 = nil\n expression207 = nil\n expression208 = nil\n expression210 = nil\n expression211 = nil\n expression213 = nil\n expression214 = nil\n expression216 = nil\n expression218 = nil\n expression220 = nil\n expression222 = nil\n expression224 = nil\n expression226 = nil\n expression228 = nil\n expression230 = nil\n expression232 = nil\n expression234 = nil\n expression236 = nil\n expression238 = nil\n expression239 = nil\n expression241 = nil\n property_name242 = nil\n expression244 = nil\n arguments245 = nil\n parameters247 = nil\n block248 = nil\n expression250 = nil\n arguments251 = nil\n literal252 = nil\n\n tree_for_char_literal106 = nil\n tree_for_char_literal110 = nil\n tree_for_string_literal113 = nil\n tree_for_SLASH_ASGN116 = nil\n tree_for_MOD_ASGN119 = nil\n tree_for_string_literal122 = nil\n tree_for_string_literal125 = nil\n tree_for_string_literal128 = nil\n tree_for_string_literal131 = nil\n tree_for_string_literal134 = nil\n tree_for_string_literal137 = nil\n tree_for_string_literal140 = nil\n tree_for_string_literal143 = nil\n tree_for_string_literal146 = nil\n tree_for_char_literal149 = nil\n tree_for_string_literal152 = nil\n tree_for_char_literal155 = nil\n tree_for_char_literal158 = nil\n tree_for_string_literal161 = nil\n tree_for_string_literal164 = nil\n tree_for_string_literal167 = nil\n tree_for_char_literal170 = nil\n tree_for_char_literal173 = nil\n tree_for_MOD176 = nil\n tree_for_SLASH179 = nil\n tree_for_char_literal182 = nil\n tree_for_string_literal185 = nil\n tree_for_string_literal188 = nil\n tree_for_string_literal191 = nil\n tree_for_string_literal194 = nil\n tree_for_string_literal197 = nil\n tree_for_string_literal200 = nil\n tree_for_char_literal203 = nil\n tree_for_char_literal206 = nil\n tree_for_string_literal209 = nil\n tree_for_string_literal212 = nil\n tree_for_string_literal215 = nil\n tree_for_string_literal217 = nil\n tree_for_string_literal219 = nil\n tree_for_string_literal221 = nil\n tree_for_string_literal223 = nil\n tree_for_UPLUS225 = nil\n tree_for_UMINUS227 = nil\n tree_for_char_literal229 = nil\n tree_for_char_literal231 = nil\n tree_for_POST_INCR233 = nil\n tree_for_POST_DECR235 = nil\n tree_for_AREF237 = nil\n tree_for_char_literal240 = nil\n tree_for_CALL243 = nil\n tree_for_ITER246 = nil\n tree_for_string_literal249 = nil\n\n begin\n # at line 150:3: ( ^( '?' expression expression expression ) | ^( '=' expression expression ) | ^( '*=' expression expression ) | ^( SLASH_ASGN expression expression ) | ^( MOD_ASGN expression expression ) | ^( '+=' expression expression ) | ^( '-=' expression expression ) | ^( '<<=' expression expression ) | ^( '>>=' expression expression ) | ^( '>>>=' expression expression ) | ^( '&=' expression expression ) | ^( '^=' expression expression ) | ^( '||=' expression expression ) | ^( '|=' expression expression ) | ^( '|' expression expression ) | ^( '||' expression expression ) | ^( '&' expression expression ) | ^( '^' expression expression ) | ^( '>>' expression expression ) | ^( '<<' expression expression ) | ^( '>>>' expression expression ) | ^( '-' expression expression ) | ^( '+' expression expression ) | ^( MOD expression expression ) | ^( SLASH expression expression ) | ^( '*' expression expression ) | ^( '==' expression expression ) | ^( '===' expression expression ) | ^( '!=' expression expression ) | ^( '!==' expression expression ) | ^( '>=' expression expression ) | ^( '<=' expression expression ) | ^( '>' expression expression ) | ^( '<' expression expression ) | ^( 'instanceof' expression expression ) | ^( 'in' expression expression ) | ^( 'delete' expression ) | ^( 'void' expression ) | ^( 'typeof' expression ) | ^( '++' expression ) | ^( '--' expression ) | ^( UPLUS expression ) | ^( UMINUS expression ) | ^( '~' expression ) | ^( '!' expression ) | ^( POST_INCR expression ) | ^( POST_DECR expression ) | ^( AREF expression expression ) | ^( '.' expression property_name ) | ^( CALL expression arguments ) | ^( ITER parameters block ) | ^( 'new' expression ( arguments )? ) | literal )\n alt_32 = 53\n case look_32 = @input.peek( 1 )\n when QMARK then alt_32 = 1\n when ASGN then alt_32 = 2\n when STAR_ASGN then alt_32 = 3\n when SLASH_ASGN then alt_32 = 4\n when MOD_ASGN then alt_32 = 5\n when PLUS_ASGN then alt_32 = 6\n when MINUS_ASGN then alt_32 = 7\n when LSHIFT_ASGN then alt_32 = 8\n when RSHIFT_ASGN then alt_32 = 9\n when RSHIFT3_ASGN then alt_32 = 10\n when AMP_ASGN then alt_32 = 11\n when HAT_ASGN then alt_32 = 12\n when OR_ASGN then alt_32 = 13\n when PIPE_ASGN then alt_32 = 14\n when PIPE then alt_32 = 15\n when OR then alt_32 = 16\n when AMP then alt_32 = 17\n when HAT then alt_32 = 18\n when RSHIFT then alt_32 = 19\n when LSHIFT then alt_32 = 20\n when RSHIFT3 then alt_32 = 21\n when MINUS then alt_32 = 22\n when PLUS then alt_32 = 23\n when MOD then alt_32 = 24\n when SLASH then alt_32 = 25\n when STAR then alt_32 = 26\n when EQ then alt_32 = 27\n when EQQ then alt_32 = 28\n when NEQ then alt_32 = 29\n when NEQQ then alt_32 = 30\n when GEQ then alt_32 = 31\n when LEQ then alt_32 = 32\n when GREATER then alt_32 = 33\n when LESS then alt_32 = 34\n when INSTANCEOF then alt_32 = 35\n when IN then alt_32 = 36\n when DELETE then alt_32 = 37\n when VOID then alt_32 = 38\n when TYPEOF then alt_32 = 39\n when INCR then alt_32 = 40\n when DECR then alt_32 = 41\n when UPLUS then alt_32 = 42\n when UMINUS then alt_32 = 43\n when TILDE then alt_32 = 44\n when NOT then alt_32 = 45\n when POST_INCR then alt_32 = 46\n when POST_DECR then alt_32 = 47\n when AREF then alt_32 = 48\n when DOT then alt_32 = 49\n when CALL then alt_32 = 50\n when ITER then alt_32 = 51\n when NEW then alt_32 = 52\n when FUNCTION, ARRAY, ARROW, REGEX, THIS, TRUE, UNDEFINED, NULL, OBJECT, FALSE, ID, IVAR, NUMBER, STRING, DOC then alt_32 = 53\n else\n raise NoViableAlternative( \"\", 32, 0 )\n end\n case alt_32\n when 1\n root_0 = @adaptor.create_flat_list\n\n\n # at line 150:5: ^( '?' expression expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n char_literal106 = match( QMARK, TOKENS_FOLLOWING_QMARK_IN_expression_813 )\n\n tree_for_char_literal106 = @adaptor.copy_node( char_literal106 )\n\n root_1 = @adaptor.become_root( tree_for_char_literal106, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_815 )\n expression107 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression107.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_817 )\n expression108 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression108.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_819 )\n expression109 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression109.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 2\n root_0 = @adaptor.create_flat_list\n\n\n # at line 151:5: ^( '=' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n char_literal110 = match( ASGN, TOKENS_FOLLOWING_ASGN_IN_expression_829 )\n\n tree_for_char_literal110 = @adaptor.copy_node( char_literal110 )\n\n root_1 = @adaptor.become_root( tree_for_char_literal110, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_831 )\n expression111 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression111.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_833 )\n expression112 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression112.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 3\n root_0 = @adaptor.create_flat_list\n\n\n # at line 152:5: ^( '*=' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal113 = match( STAR_ASGN, TOKENS_FOLLOWING_STAR_ASGN_IN_expression_843 )\n\n tree_for_string_literal113 = @adaptor.copy_node( string_literal113 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal113, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_845 )\n expression114 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression114.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_847 )\n expression115 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression115.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 4\n root_0 = @adaptor.create_flat_list\n\n\n # at line 153:5: ^( SLASH_ASGN expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __SLASH_ASGN116__ = match( SLASH_ASGN, TOKENS_FOLLOWING_SLASH_ASGN_IN_expression_857 )\n\n tree_for_SLASH_ASGN116 = @adaptor.copy_node( __SLASH_ASGN116__ )\n\n root_1 = @adaptor.become_root( tree_for_SLASH_ASGN116, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_859 )\n expression117 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression117.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_861 )\n expression118 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression118.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 5\n root_0 = @adaptor.create_flat_list\n\n\n # at line 154:5: ^( MOD_ASGN expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __MOD_ASGN119__ = match( MOD_ASGN, TOKENS_FOLLOWING_MOD_ASGN_IN_expression_871 )\n\n tree_for_MOD_ASGN119 = @adaptor.copy_node( __MOD_ASGN119__ )\n\n root_1 = @adaptor.become_root( tree_for_MOD_ASGN119, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_873 )\n expression120 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression120.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_875 )\n expression121 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression121.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 6\n root_0 = @adaptor.create_flat_list\n\n\n # at line 155:5: ^( '+=' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal122 = match( PLUS_ASGN, TOKENS_FOLLOWING_PLUS_ASGN_IN_expression_885 )\n\n tree_for_string_literal122 = @adaptor.copy_node( string_literal122 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal122, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_887 )\n expression123 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression123.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_889 )\n expression124 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression124.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 7\n root_0 = @adaptor.create_flat_list\n\n\n # at line 156:5: ^( '-=' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal125 = match( MINUS_ASGN, TOKENS_FOLLOWING_MINUS_ASGN_IN_expression_899 )\n\n tree_for_string_literal125 = @adaptor.copy_node( string_literal125 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal125, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_901 )\n expression126 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression126.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_903 )\n expression127 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression127.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 8\n root_0 = @adaptor.create_flat_list\n\n\n # at line 157:5: ^( '<<=' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal128 = match( LSHIFT_ASGN, TOKENS_FOLLOWING_LSHIFT_ASGN_IN_expression_913 )\n\n tree_for_string_literal128 = @adaptor.copy_node( string_literal128 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal128, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_915 )\n expression129 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression129.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_917 )\n expression130 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression130.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 9\n root_0 = @adaptor.create_flat_list\n\n\n # at line 158:5: ^( '>>=' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal131 = match( RSHIFT_ASGN, TOKENS_FOLLOWING_RSHIFT_ASGN_IN_expression_927 )\n\n tree_for_string_literal131 = @adaptor.copy_node( string_literal131 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal131, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_929 )\n expression132 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression132.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_931 )\n expression133 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression133.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 10\n root_0 = @adaptor.create_flat_list\n\n\n # at line 159:5: ^( '>>>=' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal134 = match( RSHIFT3_ASGN, TOKENS_FOLLOWING_RSHIFT3_ASGN_IN_expression_941 )\n\n tree_for_string_literal134 = @adaptor.copy_node( string_literal134 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal134, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_943 )\n expression135 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression135.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_945 )\n expression136 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression136.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 11\n root_0 = @adaptor.create_flat_list\n\n\n # at line 160:5: ^( '&=' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal137 = match( AMP_ASGN, TOKENS_FOLLOWING_AMP_ASGN_IN_expression_955 )\n\n tree_for_string_literal137 = @adaptor.copy_node( string_literal137 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal137, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_957 )\n expression138 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression138.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_959 )\n expression139 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression139.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 12\n root_0 = @adaptor.create_flat_list\n\n\n # at line 161:5: ^( '^=' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal140 = match( HAT_ASGN, TOKENS_FOLLOWING_HAT_ASGN_IN_expression_969 )\n\n tree_for_string_literal140 = @adaptor.copy_node( string_literal140 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal140, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_971 )\n expression141 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression141.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_973 )\n expression142 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression142.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 13\n root_0 = @adaptor.create_flat_list\n\n\n # at line 162:5: ^( '||=' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal143 = match( OR_ASGN, TOKENS_FOLLOWING_OR_ASGN_IN_expression_983 )\n\n tree_for_string_literal143 = @adaptor.copy_node( string_literal143 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal143, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_985 )\n expression144 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression144.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_987 )\n expression145 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression145.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 14\n root_0 = @adaptor.create_flat_list\n\n\n # at line 163:5: ^( '|=' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal146 = match( PIPE_ASGN, TOKENS_FOLLOWING_PIPE_ASGN_IN_expression_997 )\n\n tree_for_string_literal146 = @adaptor.copy_node( string_literal146 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal146, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_999 )\n expression147 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression147.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1001 )\n expression148 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression148.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 15\n root_0 = @adaptor.create_flat_list\n\n\n # at line 164:5: ^( '|' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n char_literal149 = match( PIPE, TOKENS_FOLLOWING_PIPE_IN_expression_1011 )\n\n tree_for_char_literal149 = @adaptor.copy_node( char_literal149 )\n\n root_1 = @adaptor.become_root( tree_for_char_literal149, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1013 )\n expression150 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression150.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1015 )\n expression151 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression151.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 16\n root_0 = @adaptor.create_flat_list\n\n\n # at line 165:5: ^( '||' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal152 = match( OR, TOKENS_FOLLOWING_OR_IN_expression_1025 )\n\n tree_for_string_literal152 = @adaptor.copy_node( string_literal152 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal152, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1027 )\n expression153 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression153.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1029 )\n expression154 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression154.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 17\n root_0 = @adaptor.create_flat_list\n\n\n # at line 166:5: ^( '&' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n char_literal155 = match( AMP, TOKENS_FOLLOWING_AMP_IN_expression_1039 )\n\n tree_for_char_literal155 = @adaptor.copy_node( char_literal155 )\n\n root_1 = @adaptor.become_root( tree_for_char_literal155, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1041 )\n expression156 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression156.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1043 )\n expression157 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression157.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 18\n root_0 = @adaptor.create_flat_list\n\n\n # at line 167:5: ^( '^' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n char_literal158 = match( HAT, TOKENS_FOLLOWING_HAT_IN_expression_1053 )\n\n tree_for_char_literal158 = @adaptor.copy_node( char_literal158 )\n\n root_1 = @adaptor.become_root( tree_for_char_literal158, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1055 )\n expression159 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression159.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1057 )\n expression160 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression160.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 19\n root_0 = @adaptor.create_flat_list\n\n\n # at line 168:5: ^( '>>' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal161 = match( RSHIFT, TOKENS_FOLLOWING_RSHIFT_IN_expression_1067 )\n\n tree_for_string_literal161 = @adaptor.copy_node( string_literal161 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal161, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1069 )\n expression162 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression162.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1071 )\n expression163 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression163.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 20\n root_0 = @adaptor.create_flat_list\n\n\n # at line 169:5: ^( '<<' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal164 = match( LSHIFT, TOKENS_FOLLOWING_LSHIFT_IN_expression_1081 )\n\n tree_for_string_literal164 = @adaptor.copy_node( string_literal164 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal164, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1083 )\n expression165 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression165.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1085 )\n expression166 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression166.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 21\n root_0 = @adaptor.create_flat_list\n\n\n # at line 170:5: ^( '>>>' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal167 = match( RSHIFT3, TOKENS_FOLLOWING_RSHIFT3_IN_expression_1095 )\n\n tree_for_string_literal167 = @adaptor.copy_node( string_literal167 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal167, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1097 )\n expression168 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression168.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1099 )\n expression169 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression169.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 22\n root_0 = @adaptor.create_flat_list\n\n\n # at line 171:5: ^( '-' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n char_literal170 = match( MINUS, TOKENS_FOLLOWING_MINUS_IN_expression_1109 )\n\n tree_for_char_literal170 = @adaptor.copy_node( char_literal170 )\n\n root_1 = @adaptor.become_root( tree_for_char_literal170, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1111 )\n expression171 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression171.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1113 )\n expression172 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression172.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 23\n root_0 = @adaptor.create_flat_list\n\n\n # at line 172:5: ^( '+' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n char_literal173 = match( PLUS, TOKENS_FOLLOWING_PLUS_IN_expression_1123 )\n\n tree_for_char_literal173 = @adaptor.copy_node( char_literal173 )\n\n root_1 = @adaptor.become_root( tree_for_char_literal173, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1125 )\n expression174 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression174.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1127 )\n expression175 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression175.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 24\n root_0 = @adaptor.create_flat_list\n\n\n # at line 173:5: ^( MOD expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __MOD176__ = match( MOD, TOKENS_FOLLOWING_MOD_IN_expression_1137 )\n\n tree_for_MOD176 = @adaptor.copy_node( __MOD176__ )\n\n root_1 = @adaptor.become_root( tree_for_MOD176, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1139 )\n expression177 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression177.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1141 )\n expression178 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression178.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 25\n root_0 = @adaptor.create_flat_list\n\n\n # at line 174:5: ^( SLASH expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __SLASH179__ = match( SLASH, TOKENS_FOLLOWING_SLASH_IN_expression_1151 )\n\n tree_for_SLASH179 = @adaptor.copy_node( __SLASH179__ )\n\n root_1 = @adaptor.become_root( tree_for_SLASH179, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1153 )\n expression180 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression180.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1155 )\n expression181 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression181.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 26\n root_0 = @adaptor.create_flat_list\n\n\n # at line 175:5: ^( '*' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n char_literal182 = match( STAR, TOKENS_FOLLOWING_STAR_IN_expression_1165 )\n\n tree_for_char_literal182 = @adaptor.copy_node( char_literal182 )\n\n root_1 = @adaptor.become_root( tree_for_char_literal182, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1167 )\n expression183 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression183.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1169 )\n expression184 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression184.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 27\n root_0 = @adaptor.create_flat_list\n\n\n # at line 176:5: ^( '==' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal185 = match( EQ, TOKENS_FOLLOWING_EQ_IN_expression_1179 )\n\n tree_for_string_literal185 = @adaptor.copy_node( string_literal185 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal185, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1181 )\n expression186 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression186.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1183 )\n expression187 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression187.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 28\n root_0 = @adaptor.create_flat_list\n\n\n # at line 177:5: ^( '===' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal188 = match( EQQ, TOKENS_FOLLOWING_EQQ_IN_expression_1193 )\n\n tree_for_string_literal188 = @adaptor.copy_node( string_literal188 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal188, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1195 )\n expression189 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression189.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1197 )\n expression190 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression190.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 29\n root_0 = @adaptor.create_flat_list\n\n\n # at line 178:5: ^( '!=' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal191 = match( NEQ, TOKENS_FOLLOWING_NEQ_IN_expression_1207 )\n\n tree_for_string_literal191 = @adaptor.copy_node( string_literal191 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal191, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1209 )\n expression192 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression192.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1211 )\n expression193 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression193.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 30\n root_0 = @adaptor.create_flat_list\n\n\n # at line 179:5: ^( '!==' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal194 = match( NEQQ, TOKENS_FOLLOWING_NEQQ_IN_expression_1221 )\n\n tree_for_string_literal194 = @adaptor.copy_node( string_literal194 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal194, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1223 )\n expression195 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression195.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1225 )\n expression196 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression196.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 31\n root_0 = @adaptor.create_flat_list\n\n\n # at line 180:5: ^( '>=' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal197 = match( GEQ, TOKENS_FOLLOWING_GEQ_IN_expression_1235 )\n\n tree_for_string_literal197 = @adaptor.copy_node( string_literal197 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal197, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1237 )\n expression198 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression198.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1239 )\n expression199 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression199.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 32\n root_0 = @adaptor.create_flat_list\n\n\n # at line 181:5: ^( '<=' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal200 = match( LEQ, TOKENS_FOLLOWING_LEQ_IN_expression_1249 )\n\n tree_for_string_literal200 = @adaptor.copy_node( string_literal200 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal200, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1251 )\n expression201 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression201.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1253 )\n expression202 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression202.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 33\n root_0 = @adaptor.create_flat_list\n\n\n # at line 182:5: ^( '>' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n char_literal203 = match( GREATER, TOKENS_FOLLOWING_GREATER_IN_expression_1263 )\n\n tree_for_char_literal203 = @adaptor.copy_node( char_literal203 )\n\n root_1 = @adaptor.become_root( tree_for_char_literal203, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1265 )\n expression204 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression204.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1267 )\n expression205 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression205.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 34\n root_0 = @adaptor.create_flat_list\n\n\n # at line 183:5: ^( '<' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n char_literal206 = match( LESS, TOKENS_FOLLOWING_LESS_IN_expression_1277 )\n\n tree_for_char_literal206 = @adaptor.copy_node( char_literal206 )\n\n root_1 = @adaptor.become_root( tree_for_char_literal206, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1279 )\n expression207 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression207.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1281 )\n expression208 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression208.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 35\n root_0 = @adaptor.create_flat_list\n\n\n # at line 184:5: ^( 'instanceof' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal209 = match( INSTANCEOF, TOKENS_FOLLOWING_INSTANCEOF_IN_expression_1291 )\n\n tree_for_string_literal209 = @adaptor.copy_node( string_literal209 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal209, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1293 )\n expression210 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression210.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1295 )\n expression211 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression211.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 36\n root_0 = @adaptor.create_flat_list\n\n\n # at line 185:5: ^( 'in' expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal212 = match( IN, TOKENS_FOLLOWING_IN_IN_expression_1305 )\n\n tree_for_string_literal212 = @adaptor.copy_node( string_literal212 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal212, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1307 )\n expression213 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression213.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1309 )\n expression214 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression214.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 37\n root_0 = @adaptor.create_flat_list\n\n\n # at line 186:5: ^( 'delete' expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal215 = match( DELETE, TOKENS_FOLLOWING_DELETE_IN_expression_1319 )\n\n tree_for_string_literal215 = @adaptor.copy_node( string_literal215 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal215, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1321 )\n expression216 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression216.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 38\n root_0 = @adaptor.create_flat_list\n\n\n # at line 187:5: ^( 'void' expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal217 = match( VOID, TOKENS_FOLLOWING_VOID_IN_expression_1331 )\n\n tree_for_string_literal217 = @adaptor.copy_node( string_literal217 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal217, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1333 )\n expression218 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression218.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 39\n root_0 = @adaptor.create_flat_list\n\n\n # at line 188:5: ^( 'typeof' expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal219 = match( TYPEOF, TOKENS_FOLLOWING_TYPEOF_IN_expression_1343 )\n\n tree_for_string_literal219 = @adaptor.copy_node( string_literal219 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal219, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1345 )\n expression220 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression220.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 40\n root_0 = @adaptor.create_flat_list\n\n\n # at line 189:5: ^( '++' expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal221 = match( INCR, TOKENS_FOLLOWING_INCR_IN_expression_1355 )\n\n tree_for_string_literal221 = @adaptor.copy_node( string_literal221 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal221, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1357 )\n expression222 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression222.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 41\n root_0 = @adaptor.create_flat_list\n\n\n # at line 190:5: ^( '--' expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal223 = match( DECR, TOKENS_FOLLOWING_DECR_IN_expression_1367 )\n\n tree_for_string_literal223 = @adaptor.copy_node( string_literal223 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal223, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1369 )\n expression224 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression224.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 42\n root_0 = @adaptor.create_flat_list\n\n\n # at line 191:5: ^( UPLUS expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __UPLUS225__ = match( UPLUS, TOKENS_FOLLOWING_UPLUS_IN_expression_1379 )\n\n tree_for_UPLUS225 = @adaptor.copy_node( __UPLUS225__ )\n\n root_1 = @adaptor.become_root( tree_for_UPLUS225, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1381 )\n expression226 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression226.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 43\n root_0 = @adaptor.create_flat_list\n\n\n # at line 192:5: ^( UMINUS expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __UMINUS227__ = match( UMINUS, TOKENS_FOLLOWING_UMINUS_IN_expression_1391 )\n\n tree_for_UMINUS227 = @adaptor.copy_node( __UMINUS227__ )\n\n root_1 = @adaptor.become_root( tree_for_UMINUS227, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1393 )\n expression228 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression228.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 44\n root_0 = @adaptor.create_flat_list\n\n\n # at line 193:5: ^( '~' expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n char_literal229 = match( TILDE, TOKENS_FOLLOWING_TILDE_IN_expression_1403 )\n\n tree_for_char_literal229 = @adaptor.copy_node( char_literal229 )\n\n root_1 = @adaptor.become_root( tree_for_char_literal229, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1405 )\n expression230 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression230.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 45\n root_0 = @adaptor.create_flat_list\n\n\n # at line 194:5: ^( '!' expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n char_literal231 = match( NOT, TOKENS_FOLLOWING_NOT_IN_expression_1415 )\n\n tree_for_char_literal231 = @adaptor.copy_node( char_literal231 )\n\n root_1 = @adaptor.become_root( tree_for_char_literal231, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1417 )\n expression232 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression232.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 46\n root_0 = @adaptor.create_flat_list\n\n\n # at line 195:5: ^( POST_INCR expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __POST_INCR233__ = match( POST_INCR, TOKENS_FOLLOWING_POST_INCR_IN_expression_1427 )\n\n tree_for_POST_INCR233 = @adaptor.copy_node( __POST_INCR233__ )\n\n root_1 = @adaptor.become_root( tree_for_POST_INCR233, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1429 )\n expression234 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression234.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 47\n root_0 = @adaptor.create_flat_list\n\n\n # at line 196:5: ^( POST_DECR expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __POST_DECR235__ = match( POST_DECR, TOKENS_FOLLOWING_POST_DECR_IN_expression_1439 )\n\n tree_for_POST_DECR235 = @adaptor.copy_node( __POST_DECR235__ )\n\n root_1 = @adaptor.become_root( tree_for_POST_DECR235, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1441 )\n expression236 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression236.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 48\n root_0 = @adaptor.create_flat_list\n\n\n # at line 197:5: ^( AREF expression expression )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __AREF237__ = match( AREF, TOKENS_FOLLOWING_AREF_IN_expression_1451 )\n\n tree_for_AREF237 = @adaptor.copy_node( __AREF237__ )\n\n root_1 = @adaptor.become_root( tree_for_AREF237, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1453 )\n expression238 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression238.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1455 )\n expression239 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression239.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 49\n root_0 = @adaptor.create_flat_list\n\n\n # at line 198:5: ^( '.' expression property_name )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n char_literal240 = match( DOT, TOKENS_FOLLOWING_DOT_IN_expression_1465 )\n\n tree_for_char_literal240 = @adaptor.copy_node( char_literal240 )\n\n root_1 = @adaptor.become_root( tree_for_char_literal240, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1467 )\n expression241 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression241.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_property_name_IN_expression_1469 )\n property_name242 = property_name\n @state.following.pop\n\n @adaptor.add_child( root_1, property_name242.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 50\n root_0 = @adaptor.create_flat_list\n\n\n # at line 199:5: ^( CALL expression arguments )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __CALL243__ = match( CALL, TOKENS_FOLLOWING_CALL_IN_expression_1479 )\n\n tree_for_CALL243 = @adaptor.copy_node( __CALL243__ )\n\n root_1 = @adaptor.become_root( tree_for_CALL243, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1481 )\n expression244 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression244.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_arguments_IN_expression_1483 )\n arguments245 = arguments\n @state.following.pop\n\n @adaptor.add_child( root_1, arguments245.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 51\n root_0 = @adaptor.create_flat_list\n\n\n # at line 200:5: ^( ITER parameters block )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __ITER246__ = match( ITER, TOKENS_FOLLOWING_ITER_IN_expression_1493 )\n\n tree_for_ITER246 = @adaptor.copy_node( __ITER246__ )\n\n root_1 = @adaptor.become_root( tree_for_ITER246, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_parameters_IN_expression_1495 )\n parameters247 = parameters\n @state.following.pop\n\n @adaptor.add_child( root_1, parameters247.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_block_IN_expression_1497 )\n block248 = block\n @state.following.pop\n\n @adaptor.add_child( root_1, block248.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 52\n root_0 = @adaptor.create_flat_list\n\n\n # at line 201:5: ^( 'new' expression ( arguments )? )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal249 = match( NEW, TOKENS_FOLLOWING_NEW_IN_expression_1507 )\n\n tree_for_string_literal249 = @adaptor.copy_node( string_literal249 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal249, root_1 )\n\n\n\n match( DOWN, nil )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_expression_IN_expression_1509 )\n expression250 = expression\n @state.following.pop\n\n @adaptor.add_child( root_1, expression250.tree )\n # at line 201:25: ( arguments )?\n alt_31 = 2\n look_31_0 = @input.peek( 1 )\n\n if ( look_31_0 == ARGUMENTS )\n alt_31 = 1\n end\n case alt_31\n when 1\n # at line 201:25: arguments\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_arguments_IN_expression_1511 )\n arguments251 = arguments\n @state.following.pop\n\n @adaptor.add_child( root_1, arguments251.tree )\n\n\n end\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 53\n root_0 = @adaptor.create_flat_list\n\n\n # at line 202:5: literal\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_literal_IN_expression_1520 )\n literal252 = literal\n @state.following.pop\n\n @adaptor.add_child( root_0, literal252.tree )\n\n\n end\n return_value.tree = @adaptor.rule_post_processing( root_0 )\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 28 )\n\n end\n \n return return_value\n end", "def regexp?(value, option = nil)\n if option.is_a?(Regexp)\n options = {:regexp => option}\n else\n options = option\n end \n return true if value.nil? and not required(options) \n return false unless value.is_a?(String)\n raise TypeError.new(\"option :regexp is not Regexp.\") unless options[:regexp].is_a?(Regexp)\n return options[:regexp].match(value)\n end", "def monocle(inputString)\n\toptic = Regexp.new(inputString)\n\treturn optic\nend", "def regexp_anchored?(regexp)\n regexp.source =~ /\\A(\\\\A|\\^).*(\\\\Z|\\$)\\Z/m ? true : false\n end", "def literal\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 33 )\n return_value = LiteralReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n root_0 = nil\n\n _last = _first_0 = nil\n string_literal267 = nil\n __IVAR268__ = nil\n __ID269__ = nil\n string_literal270 = nil\n string_literal271 = nil\n string_literal272 = nil\n string_literal273 = nil\n __NUMBER274__ = nil\n __STRING275__ = nil\n __DOC276__ = nil\n __REGEX277__ = nil\n __ARRAY278__ = nil\n __OBJECT280__ = nil\n string_literal282 = nil\n __ID283__ = nil\n string_literal286 = nil\n argument279 = nil\n property_definition281 = nil\n parameters284 = nil\n statement_block285 = nil\n parameters287 = nil\n statement_block288 = nil\n\n tree_for_string_literal267 = nil\n tree_for_IVAR268 = nil\n tree_for_ID269 = nil\n tree_for_string_literal270 = nil\n tree_for_string_literal271 = nil\n tree_for_string_literal272 = nil\n tree_for_string_literal273 = nil\n tree_for_NUMBER274 = nil\n tree_for_STRING275 = nil\n tree_for_DOC276 = nil\n tree_for_REGEX277 = nil\n tree_for_ARRAY278 = nil\n tree_for_OBJECT280 = nil\n tree_for_string_literal282 = nil\n tree_for_ID283 = nil\n tree_for_string_literal286 = nil\n\n begin\n # at line 229:3: ( 'this' | IVAR | ID | 'null' | 'true' | 'false' | 'undefined' | NUMBER | STRING | DOC | REGEX | ^( ARRAY ( argument )* ) | ^( OBJECT ( property_definition )* ) | ^( 'function' ( ID )? parameters statement_block ) | ^( '->' ( parameters )? statement_block ) )\n alt_41 = 15\n case look_41 = @input.peek( 1 )\n when THIS then alt_41 = 1\n when IVAR then alt_41 = 2\n when ID then alt_41 = 3\n when NULL then alt_41 = 4\n when TRUE then alt_41 = 5\n when FALSE then alt_41 = 6\n when UNDEFINED then alt_41 = 7\n when NUMBER then alt_41 = 8\n when STRING then alt_41 = 9\n when DOC then alt_41 = 10\n when REGEX then alt_41 = 11\n when ARRAY then alt_41 = 12\n when OBJECT then alt_41 = 13\n when FUNCTION then alt_41 = 14\n when ARROW then alt_41 = 15\n else\n raise NoViableAlternative( \"\", 41, 0 )\n end\n case alt_41\n when 1\n root_0 = @adaptor.create_flat_list\n\n\n # at line 229:5: 'this'\n _last = @input.look\n string_literal267 = match( THIS, TOKENS_FOLLOWING_THIS_IN_literal_1643 )\n\n tree_for_string_literal267 = @adaptor.copy_node( string_literal267 )\n\n @adaptor.add_child( root_0, tree_for_string_literal267 )\n\n\n\n when 2\n root_0 = @adaptor.create_flat_list\n\n\n # at line 230:5: IVAR\n _last = @input.look\n __IVAR268__ = match( IVAR, TOKENS_FOLLOWING_IVAR_IN_literal_1649 )\n\n tree_for_IVAR268 = @adaptor.copy_node( __IVAR268__ )\n\n @adaptor.add_child( root_0, tree_for_IVAR268 )\n\n\n\n when 3\n root_0 = @adaptor.create_flat_list\n\n\n # at line 231:5: ID\n _last = @input.look\n __ID269__ = match( ID, TOKENS_FOLLOWING_ID_IN_literal_1655 )\n\n tree_for_ID269 = @adaptor.copy_node( __ID269__ )\n\n @adaptor.add_child( root_0, tree_for_ID269 )\n\n\n\n when 4\n root_0 = @adaptor.create_flat_list\n\n\n # at line 232:5: 'null'\n _last = @input.look\n string_literal270 = match( NULL, TOKENS_FOLLOWING_NULL_IN_literal_1661 )\n\n tree_for_string_literal270 = @adaptor.copy_node( string_literal270 )\n\n @adaptor.add_child( root_0, tree_for_string_literal270 )\n\n\n\n when 5\n root_0 = @adaptor.create_flat_list\n\n\n # at line 233:5: 'true'\n _last = @input.look\n string_literal271 = match( TRUE, TOKENS_FOLLOWING_TRUE_IN_literal_1667 )\n\n tree_for_string_literal271 = @adaptor.copy_node( string_literal271 )\n\n @adaptor.add_child( root_0, tree_for_string_literal271 )\n\n\n\n when 6\n root_0 = @adaptor.create_flat_list\n\n\n # at line 234:5: 'false'\n _last = @input.look\n string_literal272 = match( FALSE, TOKENS_FOLLOWING_FALSE_IN_literal_1673 )\n\n tree_for_string_literal272 = @adaptor.copy_node( string_literal272 )\n\n @adaptor.add_child( root_0, tree_for_string_literal272 )\n\n\n\n when 7\n root_0 = @adaptor.create_flat_list\n\n\n # at line 235:5: 'undefined'\n _last = @input.look\n string_literal273 = match( UNDEFINED, TOKENS_FOLLOWING_UNDEFINED_IN_literal_1679 )\n\n tree_for_string_literal273 = @adaptor.copy_node( string_literal273 )\n\n @adaptor.add_child( root_0, tree_for_string_literal273 )\n\n\n\n when 8\n root_0 = @adaptor.create_flat_list\n\n\n # at line 236:5: NUMBER\n _last = @input.look\n __NUMBER274__ = match( NUMBER, TOKENS_FOLLOWING_NUMBER_IN_literal_1685 )\n\n tree_for_NUMBER274 = @adaptor.copy_node( __NUMBER274__ )\n\n @adaptor.add_child( root_0, tree_for_NUMBER274 )\n\n\n\n when 9\n root_0 = @adaptor.create_flat_list\n\n\n # at line 237:5: STRING\n _last = @input.look\n __STRING275__ = match( STRING, TOKENS_FOLLOWING_STRING_IN_literal_1691 )\n\n tree_for_STRING275 = @adaptor.copy_node( __STRING275__ )\n\n @adaptor.add_child( root_0, tree_for_STRING275 )\n\n\n\n when 10\n root_0 = @adaptor.create_flat_list\n\n\n # at line 238:5: DOC\n _last = @input.look\n __DOC276__ = match( DOC, TOKENS_FOLLOWING_DOC_IN_literal_1697 )\n\n tree_for_DOC276 = @adaptor.copy_node( __DOC276__ )\n\n @adaptor.add_child( root_0, tree_for_DOC276 )\n\n\n\n when 11\n root_0 = @adaptor.create_flat_list\n\n\n # at line 239:5: REGEX\n _last = @input.look\n __REGEX277__ = match( REGEX, TOKENS_FOLLOWING_REGEX_IN_literal_1703 )\n\n tree_for_REGEX277 = @adaptor.copy_node( __REGEX277__ )\n\n @adaptor.add_child( root_0, tree_for_REGEX277 )\n\n\n\n when 12\n root_0 = @adaptor.create_flat_list\n\n\n # at line 240:5: ^( ARRAY ( argument )* )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __ARRAY278__ = match( ARRAY, TOKENS_FOLLOWING_ARRAY_IN_literal_1711 )\n\n tree_for_ARRAY278 = @adaptor.copy_node( __ARRAY278__ )\n\n root_1 = @adaptor.become_root( tree_for_ARRAY278, root_1 )\n\n\n\n if @input.peek == DOWN\n match( DOWN, nil )\n # at line 240:14: ( argument )*\n while true # decision 37\n alt_37 = 2\n look_37_0 = @input.peek( 1 )\n\n if ( look_37_0.between?( AMP, AMP_ASGN ) || look_37_0 == POST_DECR || look_37_0.between?( GEQ, AREF ) || look_37_0.between?( GREATER, HAT ) || look_37_0.between?( ARROW, HAT_ASGN ) || look_37_0 == ASGN || look_37_0 == REGEX || look_37_0 == IN || look_37_0 == INCR || look_37_0.between?( INSTANCEOF, RSHIFT3 ) || look_37_0 == RSHIFT3_ASGN || look_37_0.between?( RSHIFT_ASGN, COLON ) || look_37_0 == LEQ || look_37_0.between?( LESS, SLASH ) || look_37_0 == SLASH_ASGN || look_37_0.between?( STAR, DECR ) || look_37_0 == STAR_ASGN || look_37_0 == LSHIFT || look_37_0.between?( DELETE, THIS ) || look_37_0.between?( MINUS, TILDE ) || look_37_0.between?( MINUS_ASGN, MOD ) || look_37_0.between?( MOD_ASGN, TYPEOF ) || look_37_0.between?( NEQ, UMINUS ) || look_37_0.between?( NEQQ, UNDEFINED ) || look_37_0 == NEW || look_37_0 == NOT || look_37_0.between?( NULL, UPLUS ) || look_37_0 == OBJECT || look_37_0.between?( EQ, OR_ASGN ) || look_37_0 == FALSE || look_37_0 == PIPE || look_37_0 == PIPE_ASGN || look_37_0 == PLUS || look_37_0.between?( ID, DOC ) )\n alt_37 = 1\n\n end\n case alt_37\n when 1\n # at line 240:14: argument\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_argument_IN_literal_1713 )\n argument279 = argument\n @state.following.pop\n\n @adaptor.add_child( root_1, argument279.tree )\n\n\n else\n break # out of loop for decision 37\n end\n end # loop for decision 37\n\n match( UP, nil )\n end\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 13\n root_0 = @adaptor.create_flat_list\n\n\n # at line 241:5: ^( OBJECT ( property_definition )* )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __OBJECT280__ = match( OBJECT, TOKENS_FOLLOWING_OBJECT_IN_literal_1724 )\n\n tree_for_OBJECT280 = @adaptor.copy_node( __OBJECT280__ )\n\n root_1 = @adaptor.become_root( tree_for_OBJECT280, root_1 )\n\n\n\n if @input.peek == DOWN\n match( DOWN, nil )\n # at line 241:15: ( property_definition )*\n while true # decision 38\n alt_38 = 2\n look_38_0 = @input.peek( 1 )\n\n if ( look_38_0 == GET || look_38_0 == COLON || look_38_0 == SET )\n alt_38 = 1\n\n end\n case alt_38\n when 1\n # at line 241:15: property_definition\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_property_definition_IN_literal_1726 )\n property_definition281 = property_definition\n @state.following.pop\n\n @adaptor.add_child( root_1, property_definition281.tree )\n\n\n else\n break # out of loop for decision 38\n end\n end # loop for decision 38\n\n match( UP, nil )\n end\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 14\n root_0 = @adaptor.create_flat_list\n\n\n # at line 242:5: ^( 'function' ( ID )? parameters statement_block )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal282 = match( FUNCTION, TOKENS_FOLLOWING_FUNCTION_IN_literal_1737 )\n\n tree_for_string_literal282 = @adaptor.copy_node( string_literal282 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal282, root_1 )\n\n\n\n match( DOWN, nil )\n # at line 242:19: ( ID )?\n alt_39 = 2\n look_39_0 = @input.peek( 1 )\n\n if ( look_39_0 == ID )\n alt_39 = 1\n end\n case alt_39\n when 1\n # at line 242:19: ID\n _last = @input.look\n __ID283__ = match( ID, TOKENS_FOLLOWING_ID_IN_literal_1739 )\n\n tree_for_ID283 = @adaptor.copy_node( __ID283__ )\n\n @adaptor.add_child( root_1, tree_for_ID283 )\n\n\n\n end\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_parameters_IN_literal_1742 )\n parameters284 = parameters\n @state.following.pop\n\n @adaptor.add_child( root_1, parameters284.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_statement_block_IN_literal_1744 )\n statement_block285 = statement_block\n @state.following.pop\n\n @adaptor.add_child( root_1, statement_block285.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 15\n root_0 = @adaptor.create_flat_list\n\n\n # at line 243:5: ^( '->' ( parameters )? statement_block )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal286 = match( ARROW, TOKENS_FOLLOWING_ARROW_IN_literal_1754 )\n\n tree_for_string_literal286 = @adaptor.copy_node( string_literal286 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal286, root_1 )\n\n\n\n match( DOWN, nil )\n # at line 243:13: ( parameters )?\n alt_40 = 2\n look_40_0 = @input.peek( 1 )\n\n if ( look_40_0 == PARAMS )\n alt_40 = 1\n end\n case alt_40\n when 1\n # at line 243:13: parameters\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_parameters_IN_literal_1756 )\n parameters287 = parameters\n @state.following.pop\n\n @adaptor.add_child( root_1, parameters287.tree )\n\n\n end\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_statement_block_IN_literal_1759 )\n statement_block288 = statement_block\n @state.following.pop\n\n @adaptor.add_child( root_1, statement_block288.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n end\n return_value.tree = @adaptor.rule_post_processing( root_0 )\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 33 )\n\n end\n \n return return_value\n end", "def nextto(regex)\n buf = \"\"\n while (true)\n\tc = self.nextchar()\n\tif !(regex =~ c).nil? || c == '\\0' || c == '\\n' || c == '\\r'\n\t self.back() if (c != '\\0')\n\t return(buf.chomp())\n\tend\n\tbuf += c\n end\n end", "def parse(node = @tree)\n result = case node\n when Regexp::Expression::Group::Capture\n # Assumes it's going to be parsed in right order\n group(new_group, sequence(*node.expressions.map { |n| parse(n) }))\n when Regexp::Expression::Alternation\n alternative(*node.expressions.map { |n| parse(n) })\n when Regexp::Expression::Assertion::Lookahead\n [:anchor, :lookahead, sequence(*node.expressions.map { |n| parse(n) })]\n when Regexp::Expression::Assertion::NegativeLookahead\n [:anchor, :negative_lookahead, sequence(*node.expressions.map { |n| parse(n) })]\n when Regexp::Expression::Assertion::Lookbehind\n [:anchor, :lookbehind, sequence(*node.expressions.map { |n| parse(n) })]\n when Regexp::Expression::Assertion::NegativeLookbehind\n [:anchor, :negative_lookbehind, sequence(*node.expressions.map { |n| parse(n) })]\n when Regexp::Expression::CharacterSet\n character_set(node.negative?, node.expressions)\n when Regexp::Expression::Subexpression\n # It's annoyingly subtypes a lot\n unless (node.class == Regexp::Expression::Subexpression or\n node.class == Regexp::Expression::Group::Passive or\n node.class == Regexp::Expression::Root or\n node.class == Regexp::Expression::Alternative)\n raise \"Don't know how to deal with #{node.class}\"\n end\n sequence(*node.expressions.map { |n| parse(n) })\n when Regexp::Expression::Literal\n literal(node.text.chars)\n when Regexp::Expression::CharacterType::Base\n character_type(node.text)\n when Regexp::Expression::EscapeSequence::Base\n character_type(node.text)\n when Regexp::Expression::Backreference::Number\n num = node.text[%r[\\A\\\\(\\d+)\\z], 1] or raise \"Parse error\"\n backref(num.to_i)\n when Regexp::Expression::Anchor::BeginningOfString\n [:anchor, :bos]\n when Regexp::Expression::Anchor::EndOfString\n [:anchor, :eos]\n when Regexp::Expression::Anchor::BeginningOfLine\n [:anchor, :bol]\n when Regexp::Expression::Anchor::EndOfLine\n [:anchor, :eol]\n else\n raise \"Unknown expression\"\n end\n if node.quantified?\n min = node.quantifier.min\n max = node.quantifier.max\n result = if result[0] == :group\n repeat_group(result, min, max)\n else\n repeat(result, min, max)\n end\n end\n\n result\n end", "def action_char_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 53)\n\n \n # - - - - main rule block - - - -\n # at line 567:4: '\\\\'' (~ ( '\\\\\\\\' | '\\\\'' ) | '\\\\\\\\' . )* '\\\\''\n match(?\\')\n # at line 567:9: (~ ( '\\\\\\\\' | '\\\\'' ) | '\\\\\\\\' . )*\n loop do #loop 14\n alt_14 = 3\n look_14_0 = @input.peek(1)\n\n if (look_14_0.between?(0x0000, ?&) || look_14_0.between?(?(, ?[) || look_14_0.between?(?], 0xFFFF)) \n alt_14 = 1\n elsif (look_14_0 == ?\\\\) \n alt_14 = 2\n\n end\n case alt_14\n when 1\n # at line 567:11: ~ ( '\\\\\\\\' | '\\\\'' )\n if @input.peek(1).between?(0x0000, ?&) || @input.peek(1).between?(?(, ?[) || @input.peek(1).between?(?], 0x00FF)\n @input.consume\n else\n mse = MismatchedSet(nil)\n recover(mse)\n raise mse\n end\n\n\n\n when 2\n # at line 567:26: '\\\\\\\\' .\n match(?\\\\)\n match_any\n\n else\n break #loop 14\n end\n end\n match(?\\')\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 53)\n\n end", "def regexp?\n behavior.regexp? || behavior.send(:ancestors).any? { |a| a.regexp? }\n end", "def =~(regexp)\n return unless regexp.instance_of?(Regexp)\n if @data.respond_to?(:to_s)\n return @data.to_s =~ regexp\n else\n return nil\n end\n end", "def scan strOrRegexp\n @in.scan /\\s*/ # Skip whitespace\n @match = @in.scan strOrRegexp\n @last_matched_token = @match if @match # Update last matched only if a token was matched\n end", "def match(input)\n regexp.match(input)\n end", "def test_match_string\n lisp = %q((progn\n (switch-to-buffer \"a\")\n\n (save-excursion\n (insert \"abcdefg\\n\")\n (goto-char 1)\n (re-search-forward \"^\\\\\\\\(.+\\\\\\\\)$\")\n )\n (match-string 1)))\n\n ruby = lambda{\n##### [with]\n with(:save_excursion) do\n goto_char 1\n re_search_forward('^\\\\(.+\\\\)$')\n end\n match_string 1\n##### [/with]\n }\n assert_equal(el4r_lisp_eval(lisp), ruby[])\n\n end", "def match?(name, literal) true end", "def to_sp\n regexp_s = self.to_s\n return StringPattern.cache[regexp_s] unless StringPattern.cache[regexp_s].nil?\n regexp = Regexp.new regexp_s\n require \"regexp_parser\"\n default_infinite = StringPattern.default_infinite\n pata = []\n pats = \"\"\n patg = [] # for (aa|bb|cc) group\n set = false\n set_negate = false\n options = []\n capture = false\n\n range = \"\"\n fixed_text = false\n options = regexp.to_s.scan(/\\A\\(\\?([mix]*)\\-[mix]*:/).join.split('')\n last_char = (regexp.to_s.gsub(/\\A\\(\\?[mix]*\\-[mix]*:/, \"\").length) - 2\n Regexp::Scanner.scan regexp do |type, token, text, ts, te|\n if type == :escape\n if token == :dot\n token = :literal\n text = \".\"\n elsif token == :literal and text.size == 2\n text = text[1]\n else\n puts \"Report token not controlled: type: #{type}, token: #{token}, text: '#{text}' [#{ts}..#{te}]\"\n end\n end\n\n unless set || (token == :interval) || (token == :zero_or_one) ||\n (token == :zero_or_more) || (token == :one_or_more) || (pats == \"\")\n if (pats[0] == \"[\") && (pats[-1] == \"]\")\n pats[0] = \"\"\n if (token == :alternation) || !patg.empty?\n if fixed_text\n if patg.size == 0\n patg << (pata.pop + pats.chop)\n else\n patg[-1] += pats.chop\n end\n else\n patg << pats.chop\n end\n else\n if fixed_text\n pata[-1] += pats.chop\n else\n if pats.size == 2\n pata << pats.chop\n else\n pata << \"1:[#{pats}\"\n end\n if last_char == te and type == :literal and token == :literal\n pata << text\n pats = \"\"\n next\n end\n end\n end\n else\n if (token == :alternation) || !patg.empty?\n patg << \"1:#{pats}\"\n else\n pata << \"1:#{pats}\"\n end\n end\n pats = \"\"\n end\n fixed_text = false\n case token\n when :open\n set = true\n pats += \"[\"\n when :close\n if type == :set\n set = false\n if pats[-1] == \"[\"\n pats.chop!\n else\n if set_negate\n pats+=\"%]*\"\n set_negate = false\n else\n pats += \"]\"\n end \n\n end\n elsif type == :group\n capture = false\n unless patg.empty?\n patg << pats if pats.to_s != \"\"\n pata << patg\n patg = []\n pats = \"\"\n end\n end\n when :negate\n if set and pats[-1] == '['\n pats+=\"%\"\n set_negate = true\n end\n when :capture\n capture = true if type == :group\n when :alternation\n if type == :meta\n if pats != \"\"\n patg << pats\n pats = \"\"\n elsif patg.empty?\n # for the case the first element was not added to patg and was on pata fex: (a+|b|c)\n patg << pata.pop\n end\n end\n when :range\n pats.chop! if options.include?('i')\n range = pats[-1]\n pats.chop!\n when :digit\n pats += \"n\"\n when :nondigit\n pats += \"*[%0123456789%]\"\n when :space\n pats += \"_\"\n when :nonspace\n pats += \"*[% %]\"\n when :word\n pats += \"Ln_\"\n when :nonword\n pats += \"$\"\n when :word_boundary\n pats += \"$\"\n when :dot\n pats += \"*\"\n when :literal\n if range == \"\"\n if text.size > 1\n fixed_text = true\n if !patg.empty?\n patg << text.chop\n else\n pata << text.chop\n end\n pats = text[-1]\n else\n pats += text\n pats += text.upcase if options.include?('i')\n end\n else\n range = range + \"-\" + text\n if range == \"a-z\"\n if options.include?('i')\n pats = \"L\" + pats\n else\n pats = \"x\" + pats\n end\n elsif range == \"A-Z\"\n if options.include?('i')\n pats = \"L\" + pats\n else\n pats = \"X\" + pats\n end\n elsif range == \"0-9\"\n pats = \"n\" + pats\n else\n if set\n pats += (range[0]..range[2]).to_a.join\n if options.include?('i')\n pats += (range[0]..range[2]).to_a.join.upcase\n end\n else\n trange = (range[0]..range[2]).to_a.join\n if options.include?('i')\n trange += trange.upcase\n end\n pats += \"[\" + trange + \"]\"\n end\n end\n range = \"\"\n end\n pats = \"[\" + pats + \"]\" unless set\n when :interval\n size = text.sub(\",\", \"-\").sub(\"{\", \"\").sub(\"}\", \"\")\n size+=(default_infinite+size.chop.to_i).to_s if size[-1] == \"-\"\n pats = size + \":\" + pats\n if !patg.empty?\n patg << pats\n else\n pata << pats\n end\n pats = \"\"\n when :zero_or_one\n pats = \"0-1:\" + pats\n if !patg.empty?\n patg << pats\n else\n pata << pats\n end\n pats = \"\"\n when :zero_or_more\n pats = \"0-#{default_infinite}:\" + pats\n if !patg.empty?\n patg << pats\n else\n pata << pats\n end\n pats = \"\"\n when :one_or_more\n pats = \"1-#{default_infinite}:\" + pats\n if !patg.empty?\n patg << pats\n else\n pata << pats\n end\n pats = \"\"\n end\n end\n if pats != \"\"\n if pata.empty?\n if pats[0] == \"[\" and pats[-1] == \"]\" #fex: /[12ab]/\n pata = [\"1:#{pats}\"]\n end\n else\n pata[-1] += pats[1] #fex: /allo/\n end\n end\n if pata.size == 1 and pata[0].kind_of?(String)\n res = pata[0]\n else\n res = pata\n end\n StringPattern.cache[regexp_s] = res\n return res\n end", "def match? re, str\n re.match(str) != nil\n end", "def pattern2regex(pattern); end", "def get_regex(pattern, encoding='ASCII', options=0)\n Regexp.new(pattern.encode(encoding),options)\nend", "def regex(pattern)\n Regexp.new pattern.regex\n end", "def try_regexp( str, re )\n\tif str =~ re\n\t\tputs \" #$PREMATCH\",\n\t\t \" \" + colorize( 'bold', 'green' ) { $MATCH },\n\t\t \" #$POSTMATCH\"\n\telse\n\t\tputs colorize( \"Nope.\", 'red' )\n\tend\nend", "def match_string_to_regexp(str)\n #str = str.split(/(\\(\\(.*?\\)\\))(?!\\))/).map{ |x|\n # x =~ /\\A\\(\\((.*)\\)\\)\\Z/ ? $1 : Regexp.escape(x)\n #}.join\n #str = str.gsub(/\\\\\\s+/, '\\s+')\n #Regexp.new(str, Regexp::IGNORECASE)\n\n #str = str.split(/([#$]\\(.*?\\))/).map{ |x|\n # x =~ /\\A[#$]\\((.*)\\)\\Z/ ? ($1.start_with?('#') ? \"(#{$1})\" : $1 ) : Regexp.escape(x)\n #}.join\n #str = str.gsub(/\\\\\\s+/, '\\s+')\n #Regexp.new(str, Regexp::IGNORECASE)\n\n$stderr.puts \"HERE!!!!!!\"\n\n str = str.split(PATTERN).map{ |x|\n case x\n when /\\A\\(\\((.*)\\)\\)\\Z/\n $1\n when /\\A[#$]\\((.*)\\)\\Z/\n $1.start_with?('#') ? \"(#{$1})\" : $1\n else\n Regexp.escape(x)\n end\n }.join\n\n str = str.gsub(/\\\\\\s+/, '\\s+')\n\n Regexp.new(str, Regexp::IGNORECASE)\n\n #rexps = []\n #str = str.gsub(/\\(\\((.*?)\\)\\)/) do |m|\n # rexps << '(' + $1 + ')'\n # \"\\0\"\n #end\n #str = Regexp.escape(str)\n #rexps.each do |r|\n # str = str.sub(\"\\0\", r)\n #end\n #str = str.gsub(/(\\\\\\ )+/, '\\s+')\n #Regexp.new(str, Regexp::IGNORECASE)\n end", "def match(rex, default=nil)\n m = rex.match self.stdout\n if m\n if m.length > 2\n m[1..-1]\n else\n m[1]\n end\n else\n default\n end\n end", "def next_regex str\n first = nil\n ## content can be string or Chunkline, so we had to write <tt>index</tt> for this.\n ## =~ does not give an error, but it does not work.\n @list.each_with_index do |line, ix|\n col = line =~ /#{str}/\n if col\n first ||= [ ix, col ]\n if ix > @current_index\n return [ix, col]\n end\n end\n end\n return first\n end", "def regex\n @regex ||= (\n if template\n Templates.const_get(template.upcase)\n else\n case pattern\n when Regexp\n pattern\n when String\n flags = 0\n flags + Regexp::MULTILINE if multiline\n flags + Regexp::IGNORECASE if insensitive\n if escape\n Regexp.new(Regexp.escape(pattern), flags)\n else\n pat = substitute_templates(pattern)\n Regexp.new(pat, flags)\n end\n end\n end\n )\n end", "def _literal\n\n _save = self.pos\n while true # choice\n _tmp = apply(:_number)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_string)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_literal unless _tmp\n return _tmp\n end", "def match(regexp)\n return regexp.match(pickle_format)\n end", "def convert_regexp_handling target\n return target.text if in_tt?\n\n handled = false\n\n @attributes.each_name_of target.type do |name|\n method_name = \"handle_regexp_#{name}\"\n\n if respond_to? method_name then\n target.text = public_send method_name, target\n handled = true\n end\n end\n\n unless handled then\n target_name = @attributes.as_string target.type\n\n raise RDoc::Error, \"Unhandled regexp handling #{target_name}: #{target}\"\n end\n\n target.text\n end", "def start_re=(_); end", "def literal?(node); end", "def quote_regexp(value)\n quote_string(value.source)\n end", "def on_regexp_beg(token)\n log \"REGEXP_BEG: '#{token}'\"\n super(token)\n end", "def string_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 42)\n\n type = STRING_LITERAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 486:4: '\\\\'' LITERAL_CHAR ( LITERAL_CHAR )* '\\\\''\n match(?\\')\n literal_char!\n # at line 486:22: ( LITERAL_CHAR )*\n loop do #loop 5\n alt_5 = 2\n look_5_0 = @input.peek(1)\n\n if (look_5_0.between?(0x0000, ?&) || look_5_0.between?(?(, 0xFFFF)) \n alt_5 = 1\n\n end\n case alt_5\n when 1\n # at line 486:22: LITERAL_CHAR\n literal_char!\n\n else\n break #loop 5\n end\n end\n match(?\\')\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 42)\n\n end", "def test_match \n begin\n md = @regexp =~ @s\n puts \"\\n#{regexp} =~ #{@s} yields #{@regexp =~ @s} and $~=#{$~.inspect}\"\n\n rescue => e\n $stderr.print e.message \n $stderr.print e.backtrace.join(\"\\n\")\n raise #re-raise\n end \n end", "def match(str = T.unsafe(nil)); end", "def match(str = T.unsafe(nil)); end", "def to_regex\n\t\treturn Regexp.new(\"\") if blank?\n\n\t\t# Building the regex takes O(word_length) time, so I always cache the result \n\t\t# of this method in a variable when using it.\n\t\tx_start, y_start = @start_pos\n\t\tx_end, y_end = @end_pos\n\t\tregex_str = \"\"\n\n\t\tx_start.upto(x_end) do |x_i|\n\t\t\ty_start.upto(y_end) do |y_i|\n\t\t\t\tletter = @letter_positions[[x_i, y_i]]\n\t\t\t\tif letter\n\t\t\t\t\tregex_str += \"[#{letter}]\"\n\t\t\t\telse\n\t\t\t\t\tregex_str += \"[A-Z]\"\n\t\t\t\tend\n\t\t\tend\n\t\tend\n\n\t\tRegexp.new(regex_str)\n\tend", "def set_regex(param)\n case param\n when String\n if md = param.match(/^\\/(.*)\\/([imx]*)$/)\n @reg_exp = eval(param)\n @reg_string = @reg_exp.source\n else\n new_param = check_builtin(param)\n @reg_string = new_param\n @reg_exp = /#{@reg_string}/\n end\n when Regexp\n @reg_exp = param\n @@parse_options[:reg_options].set(@reg_exp.options) # inner regex options have priorty\n @reg_string = @@parse_options[:reg_options].prefix_reg + @reg_exp.source\n else\n raise \"Error: string or regular expression required\"\n end\n @@parse_options[:reg_source] = @reg_string\n end", "def regexp?(elora, die=nil)\n die = @proofsheet.die(elora.inherited_namespace, elora.absolute_xpath) if not die\n validity = true # default /.*/ so default valid is true\n if die\n if die.regexp\n if not md = die.regexp.match(content(elora))\n validity = false\n error = \"REGEXP '#{content(elora)}' /#{die.regexp.source}/\"\n @errors << [die.namespace, die.xpath, error]\n end\n end\n end\n return validity\n end", "def match(pattern); end", "def show_regexp(a, re)\n if a =~ re\n \"#{$`}<<#{$&}>>#{$'}\"\n else\n \"no match\"\n end\nend", "def show_regexp(a, re)\n if a =~ re\n \"#{$`}<<#{$&}>>#{$'}\"\n else\n \"no match\"\n end\nend", "def show_regexp(a, re)\n if a =~ re\n \"#{$`}<<#{$&}>>#{$'}\"\n else\n \"no match\"\n end\nend", "def literal\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 22 )\n\n\n value = nil\n\n\n a = nil\n\n\n begin\n # at line 142:3: (a= INTEGER |a= FLOAT |a= BOOLEAN |a= STRING |a= CHAR )\n alt_38 = 5\n case look_38 = @input.peek( 1 )\n when INTEGER then alt_38 = 1\n when FLOAT then alt_38 = 2\n when BOOLEAN then alt_38 = 3\n when STRING then alt_38 = 4\n when CHAR then alt_38 = 5\n else\n @state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )\n\n\n\n raise NoViableAlternative( \"\", 38, 0 )\n\n end\n case alt_38\n when 1\n # at line 142:5: a= INTEGER\n a = match( INTEGER, TOKENS_FOLLOWING_INTEGER_IN_literal_1037 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Int, a.text) \n # <-- action\n end\n\n\n when 2\n # at line 143:5: a= FLOAT\n a = match( FLOAT, TOKENS_FOLLOWING_FLOAT_IN_literal_1047 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Float, a.text) \n # <-- action\n end\n\n\n when 3\n # at line 144:5: a= BOOLEAN\n a = match( BOOLEAN, TOKENS_FOLLOWING_BOOLEAN_IN_literal_1059 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Bool, a.text) \n # <-- action\n end\n\n\n when 4\n # at line 145:5: a= STRING\n a = match( STRING, TOKENS_FOLLOWING_STRING_IN_literal_1069 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:String, a.text) \n # <-- action\n end\n\n\n when 5\n # at line 146:5: a= CHAR\n a = match( CHAR, TOKENS_FOLLOWING_CHAR_IN_literal_1080 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Char, a.text) \n # <-- action\n end\n\n\n end\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 22 )\n\n\n end\n\n return value\n end", "def token(name,string)\n regexp = @regexp_parser.parse(@regexp_scanner.scan(string))\n regexp.name = name\n @scanner.add_regexp(regexp)\n self\n end", "def [](name)\n\t\t\t\tregex = name\n\t\t\t\tif(!regex.kind_of?(Regexp))\n\t\t\t\t\tregex = Regexp.new(Regexp.escape(name))\n\t\t\t\tend\n\t\t\t\treturn has_process?(regex)\t\t\t\t\n\t\t\tend", "def forward_regex regex\n if regex.is_a? Symbol\n regex = @text_patterns[regex]\n raise \"Pattern specified #{regex} does not exist in text_patterns \" unless regex\n end\n $multiplier = 1 if !$multiplier || $multiplier == 0\n line = @current_index\n _arr = _getarray\n buff = _arr[line].to_s\n return unless buff\n pos = @curpos || 0 # list does not have curpos\n $multiplier.times {\n found = buff.index(regex, pos)\n if !found\n # if not found, we've lost a counter\n if line+1 < _arr.length\n line += 1\n else\n return\n end\n pos = 0\n else\n pos = found + 1\n end\n $log.debug \" forward_word: pos #{pos} line #{line} buff: #{buff}\"\n }\n $multiplier = 0\n @current_index = line\n @curpos = pos\n ensure_visible\n @repaint_required = true\n end", "def regexp_enumerable(record)\n # Assume the parent's fragment text was passed in.\n parent = (record.parent.frgtxt || \"\")\n # copy the scrape expression into a simple variable so I can study it.\n s = record.scrapeexpr\n # Use a regexp to parse s\n # Try to match something like this:\n # s = '/^matchme$/'\n rg1 = /^\\/(.*?)\\/$/\n m = rg1.match(s).to_a\n # I need to untangle the parsing-regexp from the user's regexp\n if m.size == 2\n usr_regexp_s = m[1]\n else\n return 'Format problem with your regexp. Try something like this: /^(abc)(123)$/'\n end\n\n # Now work with the user's regexp and parent fragment\n usr_regexp = Regexp.new(m[1])\n usr_match = usr_regexp.match(parent).to_a\n return \"Your Regular Expression Matches Nothing In The Parent Fragment\" if (usr_match.size == 0)\n # I should now have a match but it's in an array.\n # Give it back to the user in a form he can use.\n # I guess a simple set of div-tags is friendly.\n usr_match_s = \"\"\n d = -1\n usr_match.each do |e|\n d = d + 1\n usr_match_s << \"<div class='regexp-enum-divwrap'>#{d.to_s}<p />#{e}</div>\"\n end # usr_match.each\n return usr_match_s\n end", "def convert_regexp_handling target\n convert_string super\n end", "def regexp_with_working_captures?(node)\n case node.type\n when :match_with_lvasgn\n lhs, _rhs = *node\n node.loc.selector.source == '=~' && regexp_with_named_captures?(lhs)\n when :send\n lhs, method, rhs = *node\n method == :match && [lhs, rhs].any? { |n| regexp_with_named_captures?(n) }\n end\n end", "def char_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 41)\n\n type = CHAR_LITERAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 482:4: '\\\\'' LITERAL_CHAR '\\\\''\n match(?\\')\n literal_char!\n match(?\\')\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 41)\n\n end", "def test_extended_patterns_no_flags\n [\n [ \".*\", \"abcd\\nefg\", \"abcd\" ],\n [ \"^a.\", \"abcd\\naefg\", \"ab\" ],\n [ \"^a.\", \"bacd\\naefg\", \"ae\" ],\n [ \".$\", \"bacd\\naefg\", \"d\" ]\n ].each do |reg, str, result|\n m = RustRegexp.new(reg).match(str)\n puts m.inspect\n unless m.nil?\n assert_equal result, m[0]\n end\n end\n end", "def show_regexp(a, re) \n if a =~ re \n \"#{$`}<<#{$&}>>#{$'}\" \n else \n \"no match - #{a} - #{re}\" \n end \nend", "def matches?(regexp)\n raise \"Argument is not a regexp object\" unless regexp.instance_of?(Regexp)\n !(self.text =~ regexp).nil?\n end", "def match_with_eol_regex\n return match_regex if resource[:exact]\n return /^(?>#{match_regex})#{$/}/\n end", "def =~(p0) end", "def =~(p0) end" ]
[ "0.63690096", "0.62792087", "0.6277775", "0.61385995", "0.6078573", "0.6020639", "0.5961838", "0.5928929", "0.5928929", "0.588866", "0.5882437", "0.5882437", "0.5882437", "0.58812636", "0.58812636", "0.58812636", "0.5800408", "0.572743", "0.5661076", "0.5660501", "0.5659312", "0.56170803", "0.5519793", "0.54824924", "0.5465277", "0.5463054", "0.54542464", "0.5419647", "0.5406326", "0.5401869", "0.5399607", "0.536766", "0.5366191", "0.53555715", "0.5348484", "0.5348418", "0.53132254", "0.52999777", "0.5292561", "0.52474666", "0.5223057", "0.5186953", "0.5171762", "0.5165503", "0.5158698", "0.5138658", "0.513779", "0.5132954", "0.5120494", "0.51192266", "0.51088303", "0.51064765", "0.51037264", "0.5094174", "0.5078616", "0.5051994", "0.5016744", "0.5008434", "0.49974242", "0.49962825", "0.49960712", "0.49888727", "0.49828213", "0.49730465", "0.4970988", "0.49667567", "0.49650082", "0.49628288", "0.49609175", "0.49590915", "0.49177137", "0.4916258", "0.49137372", "0.48987457", "0.48885044", "0.4883851", "0.48753935", "0.48747942", "0.48747942", "0.48739314", "0.48732516", "0.486848", "0.48519614", "0.48502982", "0.48502982", "0.48502982", "0.4844097", "0.48417467", "0.48369431", "0.48361245", "0.4831375", "0.48291567", "0.48272875", "0.4823278", "0.48228687", "0.48105276", "0.48051405", "0.479834", "0.47852504", "0.47852504" ]
0.75792164
0
Tests next literal is NumericLiteral or not. If literal is NumericLiteral return ECMA262::ECMA262Numeric object and forward lexical parser position. Otherwise return nil and position is not changed.
def numeric_literal hex_integer_literal || octal_integer_literal || decimal_literal end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decimal_literal\n pos0 = @pos\n code = @codes[@pos]\n\n if code.nil?\n return nil\n elsif code == 0x2e #.\n @pos += 1\n f = decimal_digits\n if f.nil? #=> this period is punctuator\n @pos = pos0 + 1\n return ECMA262::PUNC_PERIOD\n end\n if (code = @codes[@pos]) == 0x65 || code == 0x45\n @pos += 1\n e = exponent_part\n end\n if identifier_start?(@codes[@pos])\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n end\n\n return ECMA262::ECMA262Numeric.new('0', f, e)\n elsif code == 0x30 # zero\n i = \"0\"\n @pos += 1\n if @codes[@pos] == 0x2e #.\n @pos += 1\n f = decimal_digits\n if (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E\n @pos += 1\n e = exponent_part\n end\n elsif (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E\n @pos += 1\n e = exponent_part\n end\n if identifier_start?(@codes[@pos])\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n end\n\n return ECMA262::ECMA262Numeric.new(i, f, e)\n elsif code >= 0x31 and code <= 0x39\n i = decimal_digits\n if @codes[@pos] == 0x2e #.\n @pos += 1\n f = decimal_digits\n if (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E\n @pos += 1\n e = exponent_part\n end\n elsif (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E\n @pos += 1\n e = exponent_part\n end\n if identifier_start?(@codes[@pos])\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n end\n\n return ECMA262::ECMA262Numeric.new(i, f, e)\n end\n\n nil\n end", "def parse_numeric_constant\n if peek?(:LIT_INT)\n ExprInt.new(expect(:LIT_INT))\n else\n ExprFloat.new(expect(:LIT_FLOAT))\n end\n end", "def read_number(token)\n current = @marker.character\n is_float = current == ?.\n is_exponent = false\n token.kind = is_float ? :float_lit : :integer_lit\n\n while (current = peek_next())\n case current\n # Float lit\n when ?.\n break if is_float == true\n is_float = true\n token.kind = :float_lit\n read_next()\n\n # Digit\n when ?0, ?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9\n read_next()\n\n # Exponent\n when ?e, ?E\n if is_exponent\n token.kind = :invalid\n raise_error(:duplicate_exponent,\n \"Malformed number literal: exponent already provided\")\n end\n\n is_exponent = true\n token.kind = is_float ? :float_exp_lit : :integer_exp_lit\n\n read_next()\n current = read_next()\n current = read_next() if current == ?- || current == ?+\n\n if current < ?0 || current > ?9\n raise_error(:malformed_exponent, \"Malformed number literal: exponent expected but not provided\")\n end\n\n else break\n end\n end\n\n token.value = @source[(token.from .. @marker.source_index)]\n end", "def octal_integer_literal\n code = @codes[@pos]\n if code.nil?\n return nil\n elsif code == 0x30 and (code1 = @codes[@pos + 1]) >= 0x30 and code1 <= 0x37\n @pos += 1\n pos0 = @pos\n while code = @codes[@pos] and code >= 0x30 and code <= 0x37\n @pos += 1\n end\n if identifier_start?(code)\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n else\n return ECMA262::ECMA262Numeric.new(@codes[pos0...@pos].pack(\"U*\").to_i(8))\n end\n else\n nil\n end\n end", "def number\n result = ''\n while @current_char and @current_char =~ /[[:digit:]]/\n result << @current_char\n advance\n end\n\n if @current_char == '.'\n result << @current_char\n advance\n while @current_char and @current_char =~ /[[:digit:]]/\n result << @current_char\n advance\n end\n Token.new(:real_const, result.to_f)\n else\n Token.new(:integer_const, result.to_i)\n end\n end", "def consume_numeric\n number = consume_number\n repr = number[0]\n value = number[1]\n type = number[2]\n\n if type == :integer\n value = value.to_i\n else\n value = value.to_f\n end\n\n if start_identifier?(@s.peek(3))\n create_token(:dimension,\n :repr => repr,\n :type => type,\n :unit => consume_name,\n :value => value)\n\n elsif @s.peek == '%'\n @s.consume\n\n create_token(:percentage,\n :repr => repr,\n :type => type,\n :value => value)\n\n else\n create_token(:number,\n :repr => repr,\n :type => type,\n :value => value)\n end\n end", "def is_number? token\n Float(token) && true rescue false\n end", "def number\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 1 )\n value = nil\n __DEC_NUMBER1__ = nil\n __HEX_NUMBER2__ = nil\n\n begin\n # at line 22:2: ( DEC_NUMBER | HEX_NUMBER )\n alt_1 = 2\n look_1_0 = @input.peek( 1 )\n\n if ( look_1_0 == DEC_NUMBER )\n alt_1 = 1\n elsif ( look_1_0 == HEX_NUMBER )\n alt_1 = 2\n else\n raise NoViableAlternative( \"\", 1, 0 )\n end\n case alt_1\n when 1\n # at line 22:4: DEC_NUMBER\n __DEC_NUMBER1__ = match( DEC_NUMBER, TOKENS_FOLLOWING_DEC_NUMBER_IN_number_180 )\n # --> action\n value = __DEC_NUMBER1__.text.to_i \n # <-- action\n\n when 2\n # at line 23:4: HEX_NUMBER\n __HEX_NUMBER2__ = match( HEX_NUMBER, TOKENS_FOLLOWING_HEX_NUMBER_IN_number_187 )\n # --> action\n value = __HEX_NUMBER2__.text[2..-1].to_i(16) \n # <-- action\n\n end\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 1 )\n\n end\n \n return value\n end", "def number\n token = match(:T_INT, :T_FLOAT)\n Number.new(token.value)\n end", "def hex_integer_literal\n code = @codes[@pos]\n if code.nil?\n return nil\n #0x / 0X\n elsif code == 0x30 and (@codes[@pos+1] == 0x78 || @codes[@pos+1] == 0x58)\n @pos += 2\n pos0 = @pos\n while code = @codes[@pos] and hex_digit?(code)\n @pos += 1;\n end\n if identifier_start?(code)\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n else\n return ECMA262::ECMA262Numeric.new(@codes[pos0...@pos].pack(\"U*\").to_i(16))\n end\n else\n nil\n end\n end", "def consume_numeric; end", "def isNumber(text)\r\n @@log.debug(\"XmlRuleVisitor::isNumber\")\r\n @@log.debug(text.inspect)\r\n\r\n if (nil != text.index('.'))\r\n num = text.to_f\r\n if (num.to_s == text)\r\n @@log.debug(\"isNumber: float: #{num.to_s}\")\r\n return num\r\n end # if num\r\n end # if nil\r\n\r\n num = text.to_i\r\n if (num.to_s == text)\r\n @@log.debug(\"isNumber: int: #{num.to_s}\")\r\n return num\r\n end # if num\r\n\r\n return nil\r\n\r\n end", "def number_from_text(text)\n return nil unless starts_with_numeric_text?(text)\n text.to_i\n end", "def is_numeric(o)\n true if Integer(o) rescue false \n end", "def is_numeric(o)\n true if Integer(o) rescue false \n end", "def is_numeric?(obj)\n obj.to_s.match(/\\A[+-]?\\d+?(\\.\\d+)?\\Z/) == nil ? false : true\n end", "def consume_numeric\n number = consume_number\n\n if start_identifier?\n create_token(:dimension,\n :repr => number[0],\n :type => number[2],\n :unit => consume_name,\n :value => number[1])\n\n elsif @s.peek == '%'\n @s.consume\n\n create_token(:percentage,\n :repr => number[0],\n :type => number[2],\n :value => number[1])\n\n else\n create_token(:number,\n :repr => number[0],\n :type => number[2],\n :value => number[1])\n end\n end", "def get_num\n la = $lookahead\n\n return expected(\"Integer\") unless is_digit(la)\n\n lookahead\n\n la\nend", "def parse_number\n self.lex_state = :expr_end\n\n case\n when src.scan(/[+-]?0[xXbBdD]\\b/) then\n rb_compile_error \"Invalid numeric format\"\n when src.scan(/[+-]?(?:(?:[1-9][\\d_]*|0)(?!\\.\\d)\\b|0[Dd][0-9_]+)/) then\n int_with_base(10)\n when src.scan(/[+-]?0x[a-f0-9_]+/i) then\n int_with_base(16)\n when src.scan(/[+-]?0[Bb][01_]+/) then\n int_with_base(2)\n when src.scan(/[+-]?0[Oo]?[0-7_]*[89]/) then\n rb_compile_error \"Illegal octal digit.\"\n when src.scan(/[+-]?0[Oo]?[0-7_]+|0[Oo]/) then\n int_with_base(8)\n when src.scan(/[+-]?[\\d_]+_(e|\\.)/) then\n rb_compile_error \"Trailing '_' in number.\"\n when src.scan(/[+-]?[\\d_]+\\.[\\d_]+(e[+-]?[\\d_]+)?\\b|[+-]?[\\d_]+e[+-]?[\\d_]+\\b/i) then\n number = src.matched\n if number =~ /__/ then\n rb_compile_error \"Invalid numeric format\"\n end\n self.yacc_value = number.to_f\n :tFLOAT\n when src.scan(/[+-]?[0-9_]+(?![e])/) then\n int_with_base(10)\n else\n rb_compile_error \"Bad number format\"\n end\n end", "def start_number?(text = T.unsafe(nil)); end", "def number_token\n return unless match = @chunk.match(NUMBER)\n number = match[0]\n lexed_length = number.size\n token(:NUMBER, number, 0, lexed_length)\n lexed_length\n end", "def number(digits: T.unsafe(nil)); end", "def write_num_lit(data)\n write_num_base('c:numLit', data)\n end", "def on_numeric(n)\n n\n end", "def on_numeric(n)\n n\n end", "def special_number?(literal)\n literal.is_a?(Sass::Script::Value::String) && literal.value =~ /(calc|var)\\(/\n end", "def numerify(number_string, leading_zero: T.unsafe(nil)); end", "def to_numeric_or_nil\n self == 0 ? nil : self\n end", "def is_numeric(val)\n\t\treturn Integer(val).is_a? Integer rescue false\n\tend", "def num\n if %i[FLOAT HEX OCT BIN ASCII UNI SCI].include?(peek.type)\n Apparat::Byte::Number.new(peek.type, peek.value, peek.line, consume.column)\n else\n false\n end\n end", "def number_token\n return nil unless md = NUMBER.match(@chunk)\n number = md.to_a[0]\n token :Number, number\n number.length\n end", "def num\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 55 )\n return_value = NumReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n num_start_index = @input.index\n\n success = false # flag used for memoization\n\n begin\n # rule memoization\n if @state.backtracking > 0 and already_parsed_rule?( __method__ )\n success = true\n return return_value\n end\n # at line 345:22: ( DIGIT )+\n # at file 345:22: ( DIGIT )+\n match_count_39 = 0\n while true\n alt_39 = 2\n look_39_0 = @input.peek( 1 )\n\n if ( look_39_0 == DIGIT )\n alt_39 = 1\n\n end\n case alt_39\n when 1\n # at line 0:0: DIGIT\n match( DIGIT, TOKENS_FOLLOWING_DIGIT_IN_num_2214 )\n\n else\n match_count_39 > 0 and break\n @state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )\n\n eee = EarlyExit(39)\n\n\n raise eee\n end\n match_count_39 += 1\n end\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n return_value.value = @input.to_s( return_value.start, @input.look( -1 ) ).to_i\n # <-- action\n end\n # - - - - - - - rule clean up - - - - - - - -\n return_value.stop = @input.look( -1 )\n\n success = true\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 55 )\n memoize( __method__, num_start_index, success ) if @state.backtracking > 0\n\n end\n \n return return_value\n end", "def numeric?(obj)\n obj.to_s.match(/\\A[+-]?\\d+?(\\.\\d+)?\\Z/).nil? ? false : true\n end", "def number\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 29 )\n return_value = NumberReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n begin\n # at line 126:12: ( digits ( DOT digits )? )\n # at line 126:12: ( digits ( DOT digits )? )\n # at line 126:14: digits ( DOT digits )?\n @state.following.push( TOKENS_FOLLOWING_digits_IN_number_1255 )\n digits\n @state.following.pop\n # at line 126:21: ( DOT digits )?\n alt_44 = 2\n look_44_0 = @input.peek( 1 )\n\n if ( look_44_0 == DOT )\n alt_44 = 1\n end\n case alt_44\n when 1\n # at line 126:23: DOT digits\n match( DOT, TOKENS_FOLLOWING_DOT_IN_number_1259 )\n @state.following.push( TOKENS_FOLLOWING_digits_IN_number_1261 )\n digits\n @state.following.pop\n\n end\n\n # - - - - - - - rule clean up - - - - - - - -\n return_value.stop = @input.look( -1 )\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 29 )\n\n end\n \n return return_value\n end", "def tokenize_float_literal\n advance # Pass the .\n\n until( /[0-9eE]/.match( cchar ).nil? )\n if cchar == 'e' || cchar == 'E'\n return tokenize_exponent_literal\n end\n advance\n end\n capture_token( :float_literal )\n end", "def isNum(c)\r\n\tInteger(c) rescue return false\r\n\treturn true\r\nend", "def assert_numeric(value, pos)\n if value =~ /^0[xX]/\n lex_error(Issues::INVALID_HEX_NUMBER, {:value => value}, pos) unless value =~ /^0[xX][0-9A-Fa-f]+$/\n\n elsif value =~ /^0[^.]/\n lex_error(Issues::INVALID_OCTAL_NUMBER, {:value => value}, pos) unless value =~ /^0[0-7]+$/\n\n elsif value =~ /^\\d+[eE.]/\n lex_error(Issues::INVALID_DECIMAL_NUMBER, {:value => value}, pos) unless value =~ /^\\d+(?:\\.\\d+)?(?:[eE]-?\\d+)?$/\n\n else\n lex_error(Issues::ILLEGAL_NUMBER, {:value => value}, pos) unless value =~ /^\\d+$/\n end\n end", "def IS_NUMBER(value)\n value.first.is_a?(Numeric)\n end", "def number?\n lookahead?([:T_INT, :T_FLOAT])\n end", "def to_number_or_nil(value)\n # case/when copied from Puppet::Parser::Scope::number?\n case value\n when /^-?\\d+(:?\\.\\d+|(:?\\.\\d+)?e\\d+)$/\n value.to_f\n when /^0x[0-9a-f]+$/i\n value.to_i(16)\n when /^0[0-7]+$/\n value.to_i(8)\n when /^-?\\d+$/\n value.to_i\n else\n nil\n end\n end", "def tokenize_number(&block) # :yields: SQLTree::Token::Number\n number = current_char\n dot_encountered = false\n while /\\d/ =~ peek_char || (peek_char == '.' && !dot_encountered)\n dot_encountered = true if peek_char == '.'\n number << next_char\n end\n\n if dot_encountered\n handle_token(SQLTree::Token::Number.new(number.to_f), &block)\n else\n handle_token(SQLTree::Token::Number.new(number.to_i), &block)\n end\n end", "def numeric?\n type == \"NUMERIC\"\n end", "def number\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 32 )\n return_value = NumberReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n begin\n # at line 154:12: ( ( DIGIT )* ( '.' ( DIGIT )+ )? )\n # at line 154:12: ( ( DIGIT )* ( '.' ( DIGIT )+ )? )\n # at line 154:14: ( DIGIT )* ( '.' ( DIGIT )+ )?\n # at line 154:14: ( DIGIT )*\n while true # decision 40\n alt_40 = 2\n look_40_0 = @input.peek(1)\n\n if (look_40_0 == DIGIT)\n alt_40 = 1\n\n end\n case alt_40\n when 1\n # at line 154:14: DIGIT\n match(DIGIT, TOKENS_FOLLOWING_DIGIT_IN_number_1205)\n\n else\n break # out of loop for decision 40\n end\n end # loop for decision 40\n # at line 154:21: ( '.' ( DIGIT )+ )?\n alt_42 = 2\n look_42_0 = @input.peek(1)\n\n if (look_42_0 == T__33)\n alt_42 = 1\n end\n case alt_42\n when 1\n # at line 154:23: '.' ( DIGIT )+\n match(T__33, TOKENS_FOLLOWING_T__33_IN_number_1210)\n # at file 154:27: ( DIGIT )+\n match_count_41 = 0\n while true\n alt_41 = 2\n look_41_0 = @input.peek(1)\n\n if (look_41_0 == DIGIT)\n alt_41 = 1\n\n end\n case alt_41\n when 1\n # at line 154:27: DIGIT\n match(DIGIT, TOKENS_FOLLOWING_DIGIT_IN_number_1212)\n\n else\n match_count_41 > 0 and break\n eee = EarlyExit(41)\n\n\n raise eee\n end\n match_count_41 += 1\n end\n\n\n end\n\n # - - - - - - - rule clean up - - - - - - - -\n return_value.stop = @input.look(-1)\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 32 )\n\n end\n\n return return_value\n end", "def numeric?\n Integer(self) != nil rescue false\n end", "def _is_numeric?(str)\n Float(str) != nil rescue false\n end", "def to_i\n case @kind\n when :integer_lit, :integer_exp_lit, :single_string_lit, :double_string_lit\n @value.to_i\n when :float_lit, :float_exp_lit\n @value.to_f.to_i\n when :hex_lit\n @value.to_i(16)\n when :bin_lit\n @value.to_i(2)\n else\n raise TypeError, \"Cannot convert this token to an integer\"\n end\n end", "def number_or_nil(string)\n\t Integer(string)\n\t\trescue ArgumentError\n\t \tnil\n\tend", "def cast_to_num(str)\n Integer(str)\n rescue ArgumentError\n Float(str)\n rescue ArgumentError\n nil\n end", "def next_token\n result = peek_token\n @start = @finish\n return result if @start >= @expr.length\n\n if @expr[@start].numeric?\n @finish = @start + 1\n while @finish < @expr.length && @expr[@finish].to_s.numeric?\n @finish = @finish + 1\n end\n else\n @finish = @start + 1\n end\n result\n end", "def numeric?\n !!(self =~ /\\A[0-9]+\\.*[0-9]*\\z/)\n end", "def def_numeric sym, name=nil, &block\n def_checker(sym) { |v|\n next true if v == nil\n return false unless v.is_a? Numeric\n if block_given?\n next false unless block.call v\n end\n true\n }\n def_alias sym, name if name\n end", "def def_numeric sym, name=nil, &block\n def_checker(sym) { |v|\n next true if v == nil\n return false unless v.is_a? Numeric\n if block_given?\n next false unless block.call v\n end\n true\n }\n def_alias sym, name if name\n end", "def is_number?(tok)\n #check number format: correct types of digits\n if tok[0] == 36 # $\n return nil if( (tok.sub(\"$\",\"\") =~ /[^A-Fa-f0-9]/) != nil)\n elsif tok[0] == 67 # C\n return nil if ( (tok.sub(\"C\",\"\") =~ /[^0-7]/) != nil)\n elsif tok[0] == 66 # B\n return nil if ( (tok.sub(\"B\",\"\") =~ /[^01]/) != nil) \n elsif tok[0] >= 48 and tok[0] <= 57\n return nil if ( (tok =~ /[^0-9]/) != nil) \n else\n #can raise exceptions here:\n return nil\n end\n \n return get_number_system(tok)\n end", "def can_be_numeric?\n type == :numeric\n end", "def is_numeric?(obj)\n obj.to_s.match(/\\A[+-]?\\d+?(\\.\\d+)?\\Z/) == nil ? false : true\nend", "def number\n @number ||= parse(read)\n end", "def test_numeric\n assert_equal(true, RpnCalc.new.numeric?('0'))\n assert_equal(true, RpnCalc.new.numeric?('0.0'))\n assert_equal(true, RpnCalc.new.numeric?('-0'))\n assert_equal(true, RpnCalc.new.numeric?('-0.0'))\n assert_equal(false, RpnCalc.new.numeric?('A'))\n assert_equal(false, RpnCalc.new.numeric?('-E'))\n assert_equal(false, RpnCalc.new.numeric?(nil))\n end", "def _literal\n\n _save = self.pos\n while true # choice\n _tmp = apply(:_number)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_string)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_literal unless _tmp\n return _tmp\n end", "def tokenize_exponent_literal\n advance # Pass the e/E\n advance if cchar == '+' or cchar == '-'\n advance until( /[0-9]/.match( cchar ).nil? )\n capture_token( :exp_literal )\n end", "def is_numeric?\n begin Float(self)\n true \n end\n rescue\n false\n end", "def is_numeric?(val)\n Float val rescue false\n end", "def number!\r\n # -> uncomment the next line to manually enable rule tracing\r\n # trace_in( __method__, 2 )\r\n\r\n type = NUMBER\r\n channel = ANTLR3::DEFAULT_CHANNEL\r\n\r\n \r\n # - - - - main rule block - - - -\r\n # at line 11:10: ( '0' .. '9' )+\r\n # at file 11:10: ( '0' .. '9' )+\r\n match_count_1 = 0\r\n while true\r\n alt_1 = 2\r\n look_1_0 = @input.peek( 1 )\r\n\r\n if ( look_1_0.between?( 0x30, 0x39 ) )\r\n alt_1 = 1\r\n\r\n end\r\n case alt_1\r\n when 1\r\n # at line 11:11: '0' .. '9'\r\n match_range( 0x30, 0x39 )\r\n\r\n else\r\n match_count_1 > 0 and break\r\n eee = EarlyExit(1)\r\n\r\n\r\n raise eee\r\n end\r\n match_count_1 += 1\r\n end\r\n\r\n\r\n \r\n @state.type = type\r\n @state.channel = channel\r\n\r\n ensure\r\n # -> uncomment the next line to manually enable rule tracing\r\n # trace_out( __method__, 2 )\r\n\r\n end", "def is_numeric(str)\n Float(str) != nil rescue false\n end", "def is_numeric?(obj)\n if /[^0-9]/.match(obj) != nil\n return true\n end\n false\nend", "def isNumeric(s)\n\t\tbegin\n\t\t\tFloat(s)\n\t\trescue\n\t\t\tfalse # not numeric\n\t\telse\n\t\t\ttrue # numeric\n\t\tend\n\tend", "def consume_number\n repr = String.new\n type = :integer\n\n repr << @s.consume if @s.peek =~ RE_NUMBER_SIGN\n repr << (@s.scan(RE_DIGIT) || '')\n\n if match = @s.scan(RE_NUMBER_DECIMAL)\n repr << match\n type = :number\n end\n\n if match = @s.scan(RE_NUMBER_EXPONENT)\n repr << match\n type = :number\n end\n\n [repr, convert_string_to_number(repr), type]\n end", "def consume_number\n repr = ''\n type = :integer\n\n repr << @s.consume if @s.peek =~ RE_NUMBER_SIGN\n repr << (@s.scan(RE_DIGIT) || '')\n\n if match = @s.scan(RE_NUMBER_DECIMAL)\n repr << match\n type = :number\n end\n\n if match = @s.scan(RE_NUMBER_EXPONENT)\n repr << match\n type = :number\n end\n\n [repr, convert_string_to_number(repr), type]\n end", "def num\n begin\n Integer self\n rescue ArgumentError\n nil\n end\n end", "def is_numeric?(x)\n return false if x.is_a?(Time) # we want to treat times as dimensions\n return false if x.is_a?(Complex) # not all complex can be converted to float\n return true if x.is_a?(Numeric)\n true if Float(x) rescue false\n end", "def is_numeric?\n data_type == 'number'\n end", "def check_digit(number: T.unsafe(nil)); end", "def num!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 5 )\n\n type = NUM\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 37:9: ( '1' .. '9' ) ( '0' .. '9' )*\n # at line 37:9: ( '1' .. '9' )\n # at line 37:10: '1' .. '9'\n match_range( 0x31, 0x39 )\n\n # at line 37:19: ( '0' .. '9' )*\n while true # decision 1\n alt_1 = 2\n look_1_0 = @input.peek( 1 )\n\n if ( look_1_0.between?( 0x30, 0x39 ) )\n alt_1 = 1\n\n end\n case alt_1\n when 1\n # at line 37:20: '0' .. '9'\n match_range( 0x30, 0x39 )\n\n else\n break # out of loop for decision 1\n end\n end # loop for decision 1\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 5 )\n\n end", "def process_lit(exp)\n # TODO what about floats and big numbers?\n\n value = exp.shift\n c_type = exp.c_type\n case c_type\n when CType.long, CType.float then\n return value.to_s\n when CType.symbol then\n return value.to_s.inspect # HACK wrong! write test!\n else\n raise \"Bug! no: Unknown literal #{value}:#{value.class}\"\n end\n end", "def numeric?; float?; end", "def get_num\n value = \"\"\n\n return expected(\"Integer\") unless is_digit($lookahead)\n\n while is_digit($lookahead)\n value << $lookahead\n lookahead\n end\n\n skip_white\n\n value\nend", "def numeric?\n true\n end", "def is_numeric?(s)\n !!Float(s) rescue false\n end", "def is_numeric?\n self.data_type == 'number'\n end", "def numeric?\n val.numeric?\n end", "def is_numeric(str)\n true if Integer(str) rescue false\nend", "def get_num\n value = 0\n\n return expected(\"Integer\") unless is_digit($lookahead)\n\n while is_digit($lookahead)\n value = 10 * value + $lookahead.to_i\n lookahead\n end\n\n value\nend", "def cast_numeric(sql_type = nil)\n cast(sql_type || :integer).sql_number\n end", "def handle_float(float, lineno_column)\n Literal.new float.to_f\n end", "def cmd_numeric(obj)\n return handle_return_object(obj[:val].to_i)\n end", "def cast_numeric(arg, sql_type = nil)\n cast(arg, sql_type || Integer).sql_number\n end", "def numeric?(string)\n Float(string) != nil rescue false\n end", "def numeric(object)\n object.to_s =~ /(^[-+]?[0-9]+$)|(\\.0+)$/ ? object.to_i : Float(object)\n end", "def get_numeric_value(pattern)\n variable = self\n variable = yield variable.value(pattern) until variable.kind_of?(Numeric)\n variable\n end", "def handle_int(int, lineno_column)\n Literal.new int.to_i\n end", "def decimal_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 69 )\n\n\n\n type = DecimalLiteral\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 525:18: ( '0' | '1' .. '9' ( '0' .. '9' )* )\n # at line 525:18: ( '0' | '1' .. '9' ( '0' .. '9' )* )\n alt_21 = 2\n look_21_0 = @input.peek( 1 )\n\n if ( look_21_0 == 0x30 )\n alt_21 = 1\n elsif ( look_21_0.between?( 0x31, 0x39 ) )\n alt_21 = 2\n else\n raise NoViableAlternative( \"\", 21, 0 )\n\n end\n case alt_21\n when 1\n # at line 525:19: '0'\n match( 0x30 )\n\n when 2\n # at line 525:25: '1' .. '9' ( '0' .. '9' )*\n match_range( 0x31, 0x39 )\n # at line 525:34: ( '0' .. '9' )*\n while true # decision 20\n alt_20 = 2\n look_20_0 = @input.peek( 1 )\n\n if ( look_20_0.between?( 0x30, 0x39 ) )\n alt_20 = 1\n\n end\n case alt_20\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x30, 0x39 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n break # out of loop for decision 20\n end\n end # loop for decision 20\n\n\n end\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 69 )\n\n\n end", "def cast_numeric(sql_type = nil)\n Cast.new(self, sql_type || Integer).sql_number\n end", "def numeric?(object)\n \t\ttrue if Integer(object) rescue false\n \tend", "def is_numeric?(s)\n !!Float(s) rescue false\n end", "def k_num!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 47 )\n\n\n\n type = K_NUM\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 441:4: ( 'round' | 'aleatorio' | 'between' )\n # at line 441:4: ( 'round' | 'aleatorio' | 'between' )\n alt_14 = 3\n case look_14 = @input.peek( 1 )\n when 0x72 then alt_14 = 1\n when 0x61 then alt_14 = 2\n when 0x62 then alt_14 = 3\n else\n raise NoViableAlternative( \"\", 14, 0 )\n\n end\n case alt_14\n when 1\n # at line 441:5: 'round'\n match( \"round\" )\n\n\n when 2\n # at line 441:13: 'aleatorio'\n match( \"aleatorio\" )\n\n\n when 3\n # at line 441:25: 'between'\n match( \"between\" )\n\n\n end\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 47 )\n\n\n end", "def start_number?(text = nil)\n text = @s.current + @s.peek(2) if text.nil?\n\n case text[0]\n when '+', '-'\n !!(text[1] =~ RE_DIGIT || (text[1] == '.' && text[2] =~ RE_DIGIT))\n\n when '.'\n !!(text[1] =~ RE_DIGIT)\n\n when RE_DIGIT\n true\n\n else\n false\n end\n end", "def number?(value)\n value.is_a?(Numeric)\n end", "def numeric(input)\n return Float(input) != nil rescue false\n end", "def parse_lit\n case l.front.type\n when :str then parse_str_lit\n when :chr then parse_char_lit\n when :num then parse_num_lit\n else\n error \"expected a literal\"\n end\n end", "def is_a_number?(s)\n \ts.to_s.match(/\\A[+-]?\\d+?(\\.\\d+)?\\Z/) == nil ? false : true\n\tend", "def interpret\r\n return @number.to_i\r\n end" ]
[ "0.69190603", "0.65402716", "0.6429054", "0.61556596", "0.6020203", "0.58928955", "0.5867733", "0.58218485", "0.5782044", "0.5770232", "0.57624775", "0.57278407", "0.5707754", "0.57007396", "0.56994337", "0.56495124", "0.56194323", "0.5616846", "0.55748755", "0.55400306", "0.5537935", "0.55161536", "0.5512791", "0.55066663", "0.55066663", "0.55060375", "0.5498962", "0.54907113", "0.5485621", "0.54817957", "0.5467144", "0.54599196", "0.545878", "0.5448807", "0.5436405", "0.5431641", "0.54045683", "0.5386888", "0.5372428", "0.5356441", "0.5355852", "0.5354345", "0.5352666", "0.53396666", "0.53390294", "0.53293806", "0.5327891", "0.5305501", "0.5301732", "0.530115", "0.5296378", "0.5296378", "0.5296221", "0.52874583", "0.5282586", "0.52818173", "0.5280662", "0.52718997", "0.5265512", "0.52575076", "0.52492696", "0.52461886", "0.52387905", "0.52292407", "0.5218816", "0.520186", "0.5201602", "0.51998997", "0.51725733", "0.51701874", "0.516006", "0.5154122", "0.5151139", "0.5146349", "0.5134312", "0.51308274", "0.5115564", "0.51142853", "0.511229", "0.5107924", "0.50691223", "0.5063603", "0.5059275", "0.5058951", "0.50452274", "0.50406027", "0.5036935", "0.50332165", "0.50218797", "0.5018506", "0.50092196", "0.5003893", "0.49907744", "0.49890468", "0.49832684", "0.49640644", "0.49555272", "0.49481523", "0.49353775", "0.492852" ]
0.5983177
5
7.8.3 HexIntegerLiteral :: 0x HexDigit 0X HexDigit HexIntegerLiteral HexDigit
def hex_integer_literal code = @codes[@pos] if code.nil? return nil #0x / 0X elsif code == 0x30 and (@codes[@pos+1] == 0x78 || @codes[@pos+1] == 0x58) @pos += 2 pos0 = @pos while code = @codes[@pos] and hex_digit?(code) @pos += 1; end if identifier_start?(code) raise ParseError.new("The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit", self) else return ECMA262::ECMA262Numeric.new(@codes[pos0...@pos].pack("U*").to_i(16)) end else nil end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hex_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 68 )\n\n\n\n type = HexLiteral\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 523:14: '0' ( 'x' | 'X' ) ( HexDigit )+\n match( 0x30 )\n if @input.peek(1) == 0x58 || @input.peek(1) == 0x78\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n # at file 523:28: ( HexDigit )+\n match_count_19 = 0\n while true\n alt_19 = 2\n look_19_0 = @input.peek( 1 )\n\n if ( look_19_0.between?( 0x30, 0x39 ) || look_19_0.between?( 0x41, 0x46 ) || look_19_0.between?( 0x61, 0x66 ) )\n alt_19 = 1\n\n end\n case alt_19\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x30, 0x39 ) || @input.peek( 1 ).between?( 0x41, 0x46 ) || @input.peek( 1 ).between?( 0x61, 0x66 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n match_count_19 > 0 and break\n eee = EarlyExit(19)\n\n\n raise eee\n end\n match_count_19 += 1\n end\n\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 68 )\n\n\n end", "def hex_code(string, background = T.unsafe(nil)); end", "def hex_code(string, background = T.unsafe(nil)); end", "def numeric_literal\n hex_integer_literal || octal_integer_literal || decimal_literal\n end", "def hexadecimal(digits: T.unsafe(nil)); end", "def hex() end", "def get_hex(x)\n val = x.to_i.to_s(16)\n val = \"0#{val}\" if val.length < 2\n val\n end", "def octal_integer_literal\n code = @codes[@pos]\n if code.nil?\n return nil\n elsif code == 0x30 and (code1 = @codes[@pos + 1]) >= 0x30 and code1 <= 0x37\n @pos += 1\n pos0 = @pos\n while code = @codes[@pos] and code >= 0x30 and code <= 0x37\n @pos += 1\n end\n if identifier_start?(code)\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n else\n return ECMA262::ECMA262Numeric.new(@codes[pos0...@pos].pack(\"U*\").to_i(8))\n end\n else\n nil\n end\n end", "def hex_str\n \"##{hex_val}\"\n end", "def in_hex\n Bases.val(self).in_hex\n end", "def from_hex(str)\n str.to_s.hex\nend", "def hex2int(hex_string)\n hex_string.split(/([a-fA-F0-9][a-fA-F0-9])/i).reject(&:blank?).map {|h| h.to_i(16)}.map(&:chr).join\n end", "def hex(digits)\n string(HEX_DIGITS, digits)\n end", "def hex\n @id.unpack('H*').first\n end", "def hex_digit!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 6 )\n\n \n # - - - - main rule block - - - -\n # at line 291:13: ( '0' .. '9' | 'a' .. 'f' | 'A' .. 'F' )\n if @input.peek( 1 ).between?( 0x30, 0x39 ) || @input.peek( 1 ).between?( 0x41, 0x46 ) || @input.peek( 1 ).between?( 0x61, 0x66 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 6 )\n\n end", "def read_hex_char\n p = get_pos(-2)\n c = readc\n if !Libc.isxdigit(c)\n raise \"#{p}: \\\\x is not followed by a hexadecimal character: #{c}\"\n # errorp(p, \"\\\\x is not followed by a hexadecimal character: %c\", c);\n end\n r = 0\n while true\n case c\n when '0' .. '9' then r = (r << 4) | (c.ord - '0'.ord)\n when 'a' .. 'f' then r = (r << 4) | (c.ord - 'a'.ord + 10)\n when 'A' .. 'F' then r = (r << 4) | (c.ord - 'A'.ord + 10)\n else\n unreadc(c)\n return r\n end\n c = readc\n end\n end", "def x\n @hex.x\n end", "def hex(x)\n case x\n when Numeric\n \"0x%x\" % x\n when Array\n x.map{|y| hex(y)}\n when Hash\n x.keys.inject({}){|h, k| h[hex(k)] = hex(x[k]); h}\n end\nend", "def hex2int(input)\n hexnum = input.delete(\"#\")\n raise ArgumentError, \"Got #{input}. Hexadecimal number must have the form #FC0 or #FFCC00.\" unless (hexnum.length == 3 or hexnum.length == 6)\n (hexnum.length == 3) ? hexnum.map { |i| i + i }.to_s.hex : hexnum.hex\n end", "def xtest_int_literal_big\n check(C::IntLiteral, <<-EOS)\n |10000000000\n EOS\n end", "def hex(placeholder = nil)\n generate(placeholder, HEX_CHARACTERS)\n end", "def ascii_s(hex_s)\n [hex_s].pack('H*')\nend", "def hex?(c)\n return false if c.nil?\n c =~ /[a-fA-F0-9]/\n end", "def literal_handle(_buffer, string)\n case string\n when /^\\d{,3}$/\n return if string.size < 3\n [string.to_i].pack('U')\n when /^o([0-7]{,3})$/i\n return if Regexp.last_match(1).size < 3\n [Integer(\"0#{Regexp.last_match(1)}\")].pack('U')\n when /^x(\\h{,2})$/i\n return if Regexp.last_match(1).size < 2\n [Integer(\"0x#{Regexp.last_match(1)}\")].pack('U')\n when /^u(\\h{,4})$/\n return if Regexp.last_match(1).size < 4\n [Integer(\"0x#{Regexp.last_match(1)}\")].pack('U')\n when /^U(\\h{,8})$/\n return if Regexp.last_match(1).size < 8\n [Integer(\"0x#{Regexp.last_match(1)}\")].pack('U')\n end\n end", "def hex(num)\n num.to_s(16)\nend", "def hex\n to_i.to_s(16)\n end", "def from_hex(val)\n if val.is_a?(String)\n # Double up if single char form\n val *= 2 if val.size == 1\n # Convert to integer\n val = val.hex\n end\n # Clamp\n val = 0 if val < 0\n val = 255 if val > 255\n val\n end", "def hex; @node_id.hexlify; end", "def hex\n @hex ||= to_i.to_s(16)\n end", "def hex_val\n @hex_val\n end", "def hex(str)\n str.unpack('H*').first\n end", "def hex_to_integer(str)\n values = str.chars.map.with_index do |char, index|\n CHAR_TO_DIGIT[char] * (16 ** (index - 3).abs)\n end\n\n values.reduce(:+)\nend", "def to_hex(s); s && s.unpack('H*').first; end", "def hex(val, psign: false)\n return format(\"#{psign ? '+' : ''}0x%x\", val) if val >= 0\n\n format('-0x%x', -val)\n end", "def hex_color(args = T.unsafe(nil)); end", "def s_to_i(s)\n if s[0..1] == \"0x\"\n return s.to_i(16)\n else\n return s.to_i(10)\n end\nend", "def hex_s(ascii_s)\n ascii_s.unpack('H*')[0]\nend", "def read_hex(length=2)\n num = ''\n while peek_char.match(/[\\dA-Fa-f]/)\n num += next_char\n if num.length >= length\n break\n end\n end\n unless (1..length).include?(num.length)\n raise UnpickleException, \"Bad hex sequence in string\"\n end\n return num.to_i(16)\n end", "def to_hex\n to_octet.unpack('H*').first\n end", "def hexadecimal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 4)\n\n type = HEXADECIMAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 21:5: ( '-' )? '0x' ( 'a' .. 'f' | 'A' .. 'F' | '0' .. '9' )+\n # at line 21:5: ( '-' )?\n alt_7 = 2\n look_7_0 = @input.peek(1)\n\n if (look_7_0 == ?-) \n alt_7 = 1\n end\n case alt_7\n when 1\n # at line 21:5: '-'\n match(?-)\n\n end\n match(\"0x\")\n # at file 21:15: ( 'a' .. 'f' | 'A' .. 'F' | '0' .. '9' )+\n match_count_8 = 0\n loop do\n alt_8 = 2\n look_8_0 = @input.peek(1)\n\n if (look_8_0.between?(?0, ?9) || look_8_0.between?(?A, ?F) || look_8_0.between?(?a, ?f)) \n alt_8 = 1\n\n end\n case alt_8\n when 1\n # at line \n if @input.peek(1).between?(?0, ?9) || @input.peek(1).between?(?A, ?F) || @input.peek(1).between?(?a, ?f)\n @input.consume\n else\n mse = MismatchedSet(nil)\n recover(mse)\n raise mse\n end\n\n\n\n else\n match_count_8 > 0 and break\n eee = EarlyExit(8)\n\n\n raise eee\n end\n match_count_8 += 1\n end\n\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 4)\n\n end", "def tns_unhexify(d=/\\s*/)\n self.strip.gsub(/([A-Fa-f0-9]{1,2})#{d}?/) { $1.hex.chr }\n end", "def hex_to_u32(s)\n s.hex\nend", "def hex_for_non_alphanumeric_code(input)\n if input < LOW_HEX_CODE_LIMIT\n HEX_CODES[input]\n else\n input.to_s(HEX_BASE)\n end\n end", "def hex(value)\n \"0x\" + value.to_s(16)\n end", "def hex_digit!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 71 )\n\n\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line \n if @input.peek( 1 ).between?( 0x30, 0x39 ) || @input.peek( 1 ).between?( 0x41, 0x46 ) || @input.peek( 1 ).between?( 0x61, 0x66 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 71 )\n\n\n end", "def test_ber_integers\n assert_equal( \"\\002\\001\\005\", 5.to_ber )\n assert_equal( \"\\002\\002\\001\\364\", 500.to_ber )\n assert_equal( \"\\x02\\x02\\xC3P\", 50000.to_ber )\n assert_equal( \"\\002\\005\\001*\\005\\362\\000\", 5000000000.to_ber )\n end", "def bytea(value)\n \"'\\\\x#{value.unpack1('H*')}'::bytea\"\n end", "def asciihex(data)\n data.chop! if data[-1,1] == \">\"\n data = data[1,data.size] if data[0,1] == \"<\"\n data.gsub!(/[^A-Fa-f0-9]/,\"\")\n data << \"0\" if data.size % 2 == 1\n data.scan(/.{2}/).map { |s| s.hex.chr }.join(\"\")\n rescue Exception => e\n # Oops, there was a problem decoding the stream\n raise MalformedPDFError, \"Error occured while decoding an ASCIIHex stream (#{e.class.to_s}: #{e.to_s})\"\n end", "def to_ints(hex)\n hex.scan(/\\w\\w/).map(&:hex)\nend", "def to_hexa\n [self.token].pack('H*')\n end", "def test_char_literal_simple\n check(C::CharLiteral, <<-EOS)\n |'x'\n EOS\n end", "def bin_to_hex(s)\n return s.unpack('H*').first\n end", "def octal_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 70 )\n\n\n\n type = OctalLiteral\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 527:16: '0' ( '0' .. '7' )+\n match( 0x30 )\n # at file 527:20: ( '0' .. '7' )+\n match_count_22 = 0\n while true\n alt_22 = 2\n look_22_0 = @input.peek( 1 )\n\n if ( look_22_0.between?( 0x30, 0x37 ) )\n alt_22 = 1\n\n end\n case alt_22\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x30, 0x37 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n match_count_22 > 0 and break\n eee = EarlyExit(22)\n\n\n raise eee\n end\n match_count_22 += 1\n end\n\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 70 )\n\n\n end", "def hex_color(arg)\n case arg\n when Integer\n Java::Monkstone::ColorUtil.colorLong(arg)\n when String\n raise StandardError, 'Dodgy Hexstring' unless /#\\h{6}$/.match?(arg)\n\n Java::Monkstone::ColorUtil.colorString(arg)\n when Float\n Java::Monkstone::ColorUtil.colorDouble(arg)\n else\n raise StandardError, 'Dodgy Color Conversion'\n end\n end", "def colored_hex(val)\n colorize(hex(val), sev: :integer)\n end", "def is_hexword(s)\n s.match(/^[0-9a-fA-F]{8}$/) != nil\nend", "def hex(value)\n value[1..-1].hex + 0xff000000\n end", "def to_trace_id(hex_id)\n Array(hex_id[2..9] + hex_id[11..hex_id.length]).pack('H*')\n end", "def hex_format(value, digits: nil, upper: nil)\n hex = upper.is_a?(FalseClass) ? 'x' : 'X'\n fmt = (digits &&= positive(digits)) ? \"%0#{digits}#{hex}\" : \"%#{hex}\"\n fmt % value\n rescue ArgumentError, TypeError\n # noinspection RubyScope\n fmt % 0\n end", "def cvrt_a_ins(a)\n \"%016b\" % a\n end", "def hex(c)\n return nil if c.nil?\n b = c[0].ord\n if b < 0xff\n @@hex_codes[b]\n else\n b.to_h\n end\n end", "def hexlify\n offset = 0\n while (str = $stdin.read(16))\n out = \"#{'%08x' % offset} \"\n out << hex_digits(str)\n out << ' '\n out << ascii(str)\n puts out\n offset += 16\n end\nend", "def hex(string)\n Digest::SHA1.hexdigest(string)\n end", "def hex_trace_id\n trace_id.unpack1('H*')\n end", "def hex_encode(bin_str)\n bin_str.reverse.scan(/.{1,4}/).map{ |chunk|\n chunk.reverse.to_i(2).to_s(16)\n }.reverse.join.scan(/.{8}|.{4}|.{2}/).map{ |hex|\n hex.prepend('0x')\n }\n end", "def as_hex\n raise NotImplementedError\n end", "def hexadecimal?\n # Both standard and shorthand (CSS) style hexadecimal color value.\n not cterm? and /\\A#?(?:[0-9a-f]{3}|[0-9a-f]{6})\\z/io.match(@value.to_s)\n end", "def net_pack_int(int)\n net_error \"#{__method__}: '#{int}' is too low allowed range #{NET_MIN_INT}-#{NET_MAX_INT}\" if int < NET_MIN_INT\n net_error \"#{__method__}: '#{int}' is too high allowed range #{NET_MIN_INT}-#{NET_MAX_INT}\" if int > NET_MAX_INT\n int += NET_INT_OFFSET\n int.chr\nend", "def to_ints(hex)\n r,g,b = hex.scan(/\\w\\w/)\n [r,g,b].map do |s|\n s.hex\n end\nend", "def hex?(code)\n valid?(code, HEX_CHARACTERS)\n end", "def bin2hex(string)\n string.unpack(\"H*\").join\nend", "def hex_str2hex_octets(hex_str)\n hex_str.gsub(/../) {|s| s + \":\"}\nend", "def to_hex_code(num)\n num.to_s(16)[-4, 4]\n end", "def hashFromString(str)\n\t\tstr = str[2..-1] if str.start_with?(\"0x\")\n\t\tstr = str.to_s[0..maxStringLength-1]\n\t\thash = Integer(\"0x\" + str)\n\tend", "def get_codepoint(character)\n \"%04x\" % character.unpack(\"U\")[0]\n end", "def test_check_hex_lower_bound\r\n assert_nil @g.check_hex('/1efg')\r\n end", "def str_to_hex(str)\n\tstr.unpack(\"H*\").first\nend", "def initialize(hex)\n super\n @a = 1\n @hex = hex\n end", "def convert_decimal_octet_to_hexadecimal(octet)\n unless is_valid_decimal_octet? octet\n raise Valshamr::InvalidDecimalOctetError, \"Expected decimal value in the range of 0..255, but received: #{octet}\"\n end\n\n hexadecimal = octet.to_s(base=16).upcase\n hexadecimal.insert(0, \"0\") if hexadecimal.length == 1\n\n hexadecimal\n end", "def int2hex(int_string)\n result = []\n int_string.each_byte do |char|\n result << char.to_s(16).rjust(2, \"0\").upcase\n end\n result.join\n end", "def validate(string)\n error = \"Invalid: arg must be hexadecimal string (68 bytes)\"\n raise ArgumentError.new(error) unless string =~ /([a-f0-9]{2}){68}/\n string\n end", "def to_hexstr\n Common.unpackbytes(to_s)\n end", "def read_hex\n v = read.upcase.ord\n if v >= 48 and v < 58\n return v - 48\n elsif v >= 65 and v < 71\n return v - 65 + 10\n else\n abort \"Missing hex digit\"\n end\n end", "def test_check_hex_true\r\n assert_equal true, @g.check_hex('01ef')\r\n end", "def gas_escape(str)\n str.gsub(/./) { |s| \"\\\\x#{s[0].ord.to_s(16)}\"}\n end", "def literal_blob(v)\n blob = '0x'\n v.each_byte{|x| blob << sprintf('%02x', x)}\n blob\n end", "def to_hex\n @value.unpack(\"H*\").join\n end", "def varint!\n case (peek & 0xC0)\n when 0x00\n unpack_one!('C') #& 0x3F\n when 0x40\n unpack_one!('n') & 0x3F_FF\n when 0x80\n unpack_one!('N') & 0x3FFF_FFFF\n when 0xC0\n unpack_one!('Q>') & 0x3FFFFFFF_FFFFFFFF\n end\n end", "def to_i\n # This will be common. Memoize for efficiency.\n @int_val ||= hex.to_i(16)\n end", "def set_hex(clean_hex_str)\n @red = clean_hex_str[0, 2].to_i(16)\n @green = clean_hex_str[2, 2].to_i(16)\n @blue = clean_hex_str[4, 2].to_i(16)\n end", "def set_hex_str(ilv_hex_str)\n $test_logger.log(\"Set hex str\")\n @ilv_hex_str = ilv_hex_str\n notify_change(InputChannel::HEX)\n end", "def yy_unicode_s(char_code)\n \"U+#{\"%04X\" % char_code}\"\n end", "def codepoint(str)\n return str.gsub(/\\\\u[\\da-f]{4}/i) { |m| [m[-4..-1].to_i(16)].pack('U') }\n end", "def xchr\n n = XChar::CP1252[self] || self\n n = 42 unless XChar::VALID.find {|value| value.kind_of?(Range) ? value.include?(n) : (value == n)}\n\n XChar::PREDEFINED[n] or case n\n when 0...128\n n.chr\n when 0x400..0x4FF\n [n].pack 'U'\n else\n \"&##{n};\"\n end\n end", "def make_hex_like(regval, size_in_nibbles)\n outstr = ''\n regex = '^(.?.?.?.)'\n (size_in_nibbles - 1).times { regex += '(....)' }\n regex += '$'\n Regexp.new(regex) =~ regval\n\n nibbles = []\n size_in_nibbles.times do |n| # now grouped by nibble\n nibbles << Regexp.last_match[n + 1]\n end\n\n nibbles.each_with_index do |nibble, i|\n # If contains any special chars...\n if nibble =~ /[#{UNKNOWN_CHAR}#{DONT_CARE_CHAR}#{STORE_CHAR}#{OVERLAY_CHAR}]/\n # If all the same...\n if nibble[0] == nibble[1] && nibble[1] == nibble[2] && nibble[2] == nibble[3]\n outstr += nibble[0, 1] # .to_s\n # Otherwise present this nibble in 'binary' format\n else\n outstr += \"[#{nibble.downcase}]\"\n end\n # Otherwise if all 1s and 0s...\n else\n outstr += '%1X' % nibble.to_i(2)\n end\n end\n outstr\n end", "def hex!\n @hex = true\n end", "def int32()\n _int32(\"int32\")\n end", "def f_to_hex f\n return 'nan' if f.nan?\n if f.infinite?\n if f > 0\n return 'inf'\n else\n return '-inf'\n end\n end\n\n sign = ''\n integer = 0\n fraction = []\n exponent = 0\n\n if f < 0\n sign = '-'\n f = -f\n end\n\n while (f <= 1)\n f *= 2\n exponent -= 1\n end\n \n while (f >= 10)\n f /= 2\n exponent += 1\n end\n\n integer = f.to_i\n f -= integer\n \n while (f and fraction.size < 16)\n divisor = (16 ** (fraction.size+1)).to_f\n frac = 1\n while (f - frac/divisor > 0 and frac < 16)\n frac += 1\n end\n frac -= 1\n fraction << frac\n f -= frac/divisor\n end\n str = sign+\"0x#{integer}.\"\n fraction.each do |frac|\n str << frac.to_s(16)\n end\n str << \"p#{exponent}\"\n str\n end", "def hex_bitmap hex\n bin = hex.split(' ').map do |n|\n n.hex.to_s(2).rjust(8, '0').gsub(/[01]/, '0' => ' ', '1' => 'x')\n end\n\n bin.each { |n| puts n }\nend", "def digit!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 53 )\n\n type = DIGIT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 352:8: '0' .. '9'\n match_range( 0x30, 0x39 )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 53 )\n\n end" ]
[ "0.69237506", "0.66149974", "0.66149974", "0.66079575", "0.6539955", "0.6500011", "0.63553786", "0.62480104", "0.6160922", "0.6156475", "0.6146044", "0.6136119", "0.6101334", "0.606544", "0.6061789", "0.60541445", "0.6048677", "0.6035734", "0.601013", "0.6002893", "0.59985405", "0.596874", "0.59580785", "0.5918924", "0.59142864", "0.5880174", "0.5839562", "0.58361894", "0.58349717", "0.58288336", "0.5795025", "0.57729506", "0.5756094", "0.5753198", "0.5750267", "0.57327133", "0.5700653", "0.57002985", "0.5697998", "0.5683599", "0.5678764", "0.5676412", "0.5675743", "0.56454945", "0.5631919", "0.5605751", "0.5603376", "0.55942994", "0.5586444", "0.55863583", "0.5571269", "0.5556937", "0.5548815", "0.55415845", "0.5536946", "0.5519544", "0.55151457", "0.55115193", "0.5469893", "0.5458085", "0.54580504", "0.5457539", "0.5456173", "0.54527223", "0.5452008", "0.5441792", "0.5435235", "0.5432553", "0.5431607", "0.54310983", "0.54141366", "0.53846085", "0.53622943", "0.53519243", "0.53517985", "0.53508276", "0.5346828", "0.5328169", "0.5315269", "0.53104717", "0.530724", "0.53062886", "0.5303431", "0.52980936", "0.52871275", "0.5279658", "0.5270705", "0.52681786", "0.52245116", "0.52134115", "0.52009374", "0.51821643", "0.5171688", "0.51711565", "0.5165219", "0.5160436", "0.5153763", "0.5148367", "0.514686", "0.5133581" ]
0.7719524
0
B.1.1 OctalIntegerLiteral :: 0 OctalDigit OctalIntegerLiteral OctalDigit
def octal_integer_literal code = @codes[@pos] if code.nil? return nil elsif code == 0x30 and (code1 = @codes[@pos + 1]) >= 0x30 and code1 <= 0x37 @pos += 1 pos0 = @pos while code = @codes[@pos] and code >= 0x30 and code <= 0x37 @pos += 1 end if identifier_start?(code) raise ParseError.new("The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit", self) else return ECMA262::ECMA262Numeric.new(@codes[pos0...@pos].pack("U*").to_i(8)) end else nil end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def octal(digits)\n string(OCTAL_DIGITS, digits)\n end", "def numeric_literal\n hex_integer_literal || octal_integer_literal || decimal_literal\n end", "def oct\n str = self.__zreplace_first_double_underscore.strip # for 1.8.7\n arr = str.__extract_base(8)\n base = arr.__at(0)\n sign_str = arr.__at(1)\n body = arr.__at(2)\n first_ch = body.__at(0)\n if first_ch.eql?( ?+ ) || first_ch.eql?( ?- )\n return 0 # redundant sign character is not an octal digit\n end\n num = Integer.__from_string_radix(body, base)\n if sign_str[0].eql?( ?- )\n num = num * -1\n end\n num\n end", "def xtest_int_literal_big\n check(C::IntLiteral, <<-EOS)\n |10000000000\n EOS\n end", "def octal_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 70 )\n\n\n\n type = OctalLiteral\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 527:16: '0' ( '0' .. '7' )+\n match( 0x30 )\n # at file 527:20: ( '0' .. '7' )+\n match_count_22 = 0\n while true\n alt_22 = 2\n look_22_0 = @input.peek( 1 )\n\n if ( look_22_0.between?( 0x30, 0x37 ) )\n alt_22 = 1\n\n end\n case alt_22\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x30, 0x37 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n match_count_22 > 0 and break\n eee = EarlyExit(22)\n\n\n raise eee\n end\n match_count_22 += 1\n end\n\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 70 )\n\n\n end", "def hex_integer_literal\n code = @codes[@pos]\n if code.nil?\n return nil\n #0x / 0X\n elsif code == 0x30 and (@codes[@pos+1] == 0x78 || @codes[@pos+1] == 0x58)\n @pos += 2\n pos0 = @pos\n while code = @codes[@pos] and hex_digit?(code)\n @pos += 1;\n end\n if identifier_start?(code)\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n else\n return ECMA262::ECMA262Numeric.new(@codes[pos0...@pos].pack(\"U*\").to_i(16))\n end\n else\n nil\n end\n end", "def Integer(p0) end", "def oct() end", "def numerify(number_string, leading_zero: T.unsafe(nil)); end", "def leading_zero_number(digits: T.unsafe(nil)); end", "def octal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 6)\n\n type = OCTAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 29:5: ( '-' )? '0' ( '0' .. '7' )*\n # at line 29:5: ( '-' )?\n alt_11 = 2\n look_11_0 = @input.peek(1)\n\n if (look_11_0 == ?-) \n alt_11 = 1\n end\n case alt_11\n when 1\n # at line 29:5: '-'\n match(?-)\n\n end\n match(?0)\n # at line 29:14: ( '0' .. '7' )*\n loop do #loop 12\n alt_12 = 2\n look_12_0 = @input.peek(1)\n\n if (look_12_0.between?(?0, ?7)) \n alt_12 = 1\n\n end\n case alt_12\n when 1\n # at line 29:15: '0' .. '7'\n match_range(?0, ?7)\n\n else\n break #loop 12\n end\n end\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 6)\n\n end", "def read_octal_char(c)\n r = c.ord - '0'.ord\n if !nextoct?\n return r\n end\n\n r = (r << 3) | (readc.ord - '0'.ord)\n if !nextoct?\n return r\n end\n\n (r << 3) | (readc.ord - '0'.ord)\n end", "def octal?(c)\n return false if c.nil?\n c =~ /[0-7]/\n end", "def literal_integer(v)\n if v > 9223372036854775807 || v < -9223372036854775808\n literal_integer_outside_bigint_range(v)\n else\n v.to_s\n end\n end", "def binary(digits: T.unsafe(nil)); end", "def non_zero_digit; end", "def symbolic_to_octal(permission_string)\n codex = {'r' => 4, 'w' => 2, 'x' => 1, '-' => 0 }\n one = codex[permission_string[0]] + codex[permission_string[1]] + codex[permission_string[2]]\n two = codex[permission_string[3]] + codex[permission_string[4]] + codex[permission_string[5]]\n three = codex[permission_string[6]] + codex[permission_string[7]] + codex[permission_string[8]] \n \"#{one}#{two}#{three}\".to_i\nend", "def intGetOctagonalInt i\n i * ( 3 * i - 2 )\nend", "def next_octal\n c = self.next\n return nil if c.nil?\n return c if octal?(c)\n nil\n end", "def oct(a)\n\tb = []\n\ta.each do |x|\n\t\tif x % 8 == 0\n\t b << x\n\t\tend\n\tend\nreturn b.inspect\t\nend", "def handle_int(int, lineno_column)\n Literal.new int.to_i\n end", "def octal_val(decimal)\n octal = 0\n i = 0\n num = decimal\n\n loop do\n break if num <= 0\n octal += (num.divmod(8)[1] * (10 ** i))\n num = num.divmod(8)[0]\n i += 1\n end\n\n octal\nend", "def leading_zero_number(digits: 10)\n \"0#{(2..digits).collect { digit }.join}\"\n end", "def integer_parsers\n @integer = action(seq(1..9, rep0(0..9))) do |ast|\n ast.flatten.join('').to_i\n end\n end", "def test_ber_integers\n assert_equal( \"\\002\\001\\005\", 5.to_ber )\n assert_equal( \"\\002\\002\\001\\364\", 500.to_ber )\n assert_equal( \"\\x02\\x02\\xC3P\", 50000.to_ber )\n assert_equal( \"\\002\\005\\001*\\005\\362\\000\", 5000000000.to_ber )\n end", "def net_pack_int(int)\n net_error \"#{__method__}: '#{int}' is too low allowed range #{NET_MIN_INT}-#{NET_MAX_INT}\" if int < NET_MIN_INT\n net_error \"#{__method__}: '#{int}' is too high allowed range #{NET_MIN_INT}-#{NET_MAX_INT}\" if int > NET_MAX_INT\n int += NET_INT_OFFSET\n int.chr\nend", "def DISABLED_test_non_negative_integers\n (0..1000).each do |digit|\n assert_tokenises_as digit.to_s, IntegerToken.new(digit)\n end\n assert_tokenises_as '0 ', IntegerToken.new(0)\n assert_tokenises_as ' 0', IntegerToken.new(0)\n end", "def literal_handle(_buffer, string)\n case string\n when /^\\d{,3}$/\n return if string.size < 3\n [string.to_i].pack('U')\n when /^o([0-7]{,3})$/i\n return if Regexp.last_match(1).size < 3\n [Integer(\"0#{Regexp.last_match(1)}\")].pack('U')\n when /^x(\\h{,2})$/i\n return if Regexp.last_match(1).size < 2\n [Integer(\"0x#{Regexp.last_match(1)}\")].pack('U')\n when /^u(\\h{,4})$/\n return if Regexp.last_match(1).size < 4\n [Integer(\"0x#{Regexp.last_match(1)}\")].pack('U')\n when /^U(\\h{,8})$/\n return if Regexp.last_match(1).size < 8\n [Integer(\"0x#{Regexp.last_match(1)}\")].pack('U')\n end\n end", "def number(digits: T.unsafe(nil)); end", "def initialize(digits)\n @value = digits.to_s.to_i\n end", "def test_002_from_i()\n TestVals.each do |sVal|\n iVal = sVal.to_i(2)\n bs = BitString.new(0)\n bs.from_i(iVal)\n assert_equal(iVal,\n bs.to_i,\n \"Test BitString.from_i(#{sVal}) => #{iVal}\")\n end\n end", "def add_zero_to_1_to_9(str)\n str.sub(/(?<=\\D)\\d\\D?$/, '0\\0' )\n end", "def to_decimal\n octal_string =~ INVALID_OCTAL ? 0 : calculate\n end", "def remove_octals(string)\n command = string\n string.gsub(/^0\\d|\\D0\\d/) do |oct|\n dec = oct.sub(\"0\", \"\")\n command = command.sub(oct, dec)\n end\n command\nend", "def rb_integer(from: T.unsafe(nil), to: T.unsafe(nil)); end", "def prepad_zeros(length)\n str = self\n str.insert(0, '0') while str.length < length\n str\n end", "def test_IntegerLiterals_sample02\n assert_equal(\"Fixnum\", 1_000_000_000.class.to_s)\n end", "def digit!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 53 )\n\n type = DIGIT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 352:8: '0' .. '9'\n match_range( 0x30, 0x39 )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 53 )\n\n end", "def typecast_value_integer(value)\n Integer(value.is_a?(String) ? value.sub(LEADING_ZERO_RE, LEADING_ZERO_REP) : value)\n end", "def hexadecimal(digits: T.unsafe(nil)); end", "def fake_bin(s)\n s.tr('1-9', '00001')\nend", "def digit!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 37 )\n\n type = DIGIT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 136:8: '0' .. '9'\n match_range( 0x30, 0x39 )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 37 )\n\n end", "def decimal_literal\n pos0 = @pos\n code = @codes[@pos]\n\n if code.nil?\n return nil\n elsif code == 0x2e #.\n @pos += 1\n f = decimal_digits\n if f.nil? #=> this period is punctuator\n @pos = pos0 + 1\n return ECMA262::PUNC_PERIOD\n end\n if (code = @codes[@pos]) == 0x65 || code == 0x45\n @pos += 1\n e = exponent_part\n end\n if identifier_start?(@codes[@pos])\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n end\n\n return ECMA262::ECMA262Numeric.new('0', f, e)\n elsif code == 0x30 # zero\n i = \"0\"\n @pos += 1\n if @codes[@pos] == 0x2e #.\n @pos += 1\n f = decimal_digits\n if (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E\n @pos += 1\n e = exponent_part\n end\n elsif (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E\n @pos += 1\n e = exponent_part\n end\n if identifier_start?(@codes[@pos])\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n end\n\n return ECMA262::ECMA262Numeric.new(i, f, e)\n elsif code >= 0x31 and code <= 0x39\n i = decimal_digits\n if @codes[@pos] == 0x2e #.\n @pos += 1\n f = decimal_digits\n if (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E\n @pos += 1\n e = exponent_part\n end\n elsif (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E\n @pos += 1\n e = exponent_part\n end\n if identifier_start?(@codes[@pos])\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n end\n\n return ECMA262::ECMA262Numeric.new(i, f, e)\n end\n\n nil\n end", "def binary(digits)\n string(BINARY_DIGITS, digits)\n end", "def type_literal_generic_fixnum(column)\n type_literal_generic_integer(column)\n end", "def set_literal\n <<-CODE\n next_int;\n tuple_put(state, cpu_current_literals(state, c), _int, stack_top());\n CODE\n end", "def DISABLED_test_negative_integers\n (-1000..-1).each do |digit|\n assert_tokenises_as digit.to_s, IntegerToken.new(digit)\n end\n assert_tokenises_as '-1 ', IntegerToken.new(-1)\n assert_tokenises_as ' -1', IntegerToken.new(-1)\n end", "def digit; end", "def BinaryConverter(str)\r\n return str.to_i(2)\r\nend", "def to_int() end", "def to_int() end", "def digit \n\t\n\t$cst.add_branch(\"digit\")\n\t\n\tmatch_token(\"T_DIGIT\", $tokens[$index])\n\t\n\t$cst.ascend\n\t\nend", "def replace_binary(str)\n str.gsub(/[01]+/) { |bin| bin.to_i(2).to_s(10) }\n end", "def replace_binary(str)\n str.gsub(/[01]+/) { |bin| bin.to_i(2).to_s(10) }\n end", "def convert_int(int)\n [int].pack('s>')\n end", "def my_atoi(s)\n new_string = \"\"\n nums = \"+-01234567890.\"\n \n s.each_char do |char| \n if !nums.include?(char) && char != \" \" && new_string == \"\"\n return 0\n elsif !nums.include?(char) && char !=\" \"\n break\n else nums.include?(char)\n new_string << char \n end\n end\n \n if new_string.to_i < -2**31\n return -2**31\n elsif new_string.to_i > 2**31 - 1\n return 2**31 - 1\n else\n return new_string.to_i\n end\nend", "def binary(integer)\n\nend", "def binary(integer)\n\nend", "def octal_escape!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 75 )\n\n\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 594:7: ( '\\\\\\\\' ( '0' .. '3' ) ( '0' .. '7' ) ( '0' .. '7' ) | '\\\\\\\\' ( '0' .. '7' ) ( '0' .. '7' ) | '\\\\\\\\' ( '0' .. '7' ) )\n alt_24 = 3\n look_24_0 = @input.peek( 1 )\n\n if ( look_24_0 == 0x5c )\n look_24_1 = @input.peek( 2 )\n\n if ( look_24_1.between?( 0x30, 0x33 ) )\n look_24_2 = @input.peek( 3 )\n\n if ( look_24_2.between?( 0x30, 0x37 ) )\n look_24_4 = @input.peek( 4 )\n\n if ( look_24_4.between?( 0x30, 0x37 ) )\n alt_24 = 1\n else\n alt_24 = 2\n\n end\n else\n alt_24 = 3\n\n end\n elsif ( look_24_1.between?( 0x34, 0x37 ) )\n look_24_3 = @input.peek( 3 )\n\n if ( look_24_3.between?( 0x30, 0x37 ) )\n alt_24 = 2\n else\n alt_24 = 3\n\n end\n else\n raise NoViableAlternative( \"\", 24, 1 )\n\n end\n else\n raise NoViableAlternative( \"\", 24, 0 )\n\n end\n case alt_24\n when 1\n # at line 594:11: '\\\\\\\\' ( '0' .. '3' ) ( '0' .. '7' ) ( '0' .. '7' )\n match( 0x5c )\n if @input.peek( 1 ).between?( 0x30, 0x33 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n if @input.peek( 1 ).between?( 0x30, 0x37 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n if @input.peek( 1 ).between?( 0x30, 0x37 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n when 2\n # at line 595:11: '\\\\\\\\' ( '0' .. '7' ) ( '0' .. '7' )\n match( 0x5c )\n if @input.peek( 1 ).between?( 0x30, 0x37 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n if @input.peek( 1 ).between?( 0x30, 0x37 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n when 3\n # at line 596:11: '\\\\\\\\' ( '0' .. '7' )\n match( 0x5c )\n if @input.peek( 1 ).between?( 0x30, 0x37 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n end\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 75 )\n\n\n end", "def int_n! n\n value = 0\n [\n [8, 'Q>'],\n [4, 'N'],\n [2, 'n'],\n [1, 'C'],\n ].each do |nbytes, fmt|\n shift = nbytes * 8\n while n > nbytes\n value = (value << shift) | unpack_one!(fmt)\n n -= nbytes\n end\n end\n value\n end", "def digit!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 47 )\n\n type = DIGIT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 184:12: ( '0' .. '9' )+\n # at file 184:12: ( '0' .. '9' )+\n match_count_3 = 0\n while true\n alt_3 = 2\n look_3_0 = @input.peek( 1 )\n\n if ( look_3_0.between?( 0x30, 0x39 ) )\n alt_3 = 1\n\n end\n case alt_3\n when 1\n # at line 184:13: '0' .. '9'\n match_range( 0x30, 0x39 )\n\n else\n match_count_3 > 0 and break\n eee = EarlyExit(3)\n\n\n raise eee\n end\n match_count_3 += 1\n end\n\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 47 )\n\n end", "def test_001_to_i()\n TestVals.each do |sVal|\n iVal = sVal.to_i(2)\n bs = BitString.new(sVal)\n assert_equal(iVal,\n bs.to_i,\n \"Test BitString.to_i(#{sVal}) => #{iVal}\")\n end\n end", "def parse_integer(int_string)\n Integer(int_string)\n end", "def to_int(*) end", "def encode_mpint(value)\n value.to_s(0)\n end", "def to_i(base=10) end", "def myAtoi( str)\n value = 0\n size = str.length\n i = 0\n while (i < size)\n value = (value << 3) + (value << 1) + (str[i].ord - '0'.ord)\n i += 1\n end\n return value\nend", "def stringy2(int, *zero)\n str = ''\n if zero == [0] then str += '0' else str += '1' end\n int.times do |n|\n if n > 0\n array = str.chars\n if array[n - 1] == '0' then str += '1' else str += '0' end\n end\n end\n p str\nend", "def decimal_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 69 )\n\n\n\n type = DecimalLiteral\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 525:18: ( '0' | '1' .. '9' ( '0' .. '9' )* )\n # at line 525:18: ( '0' | '1' .. '9' ( '0' .. '9' )* )\n alt_21 = 2\n look_21_0 = @input.peek( 1 )\n\n if ( look_21_0 == 0x30 )\n alt_21 = 1\n elsif ( look_21_0.between?( 0x31, 0x39 ) )\n alt_21 = 2\n else\n raise NoViableAlternative( \"\", 21, 0 )\n\n end\n case alt_21\n when 1\n # at line 525:19: '0'\n match( 0x30 )\n\n when 2\n # at line 525:25: '1' .. '9' ( '0' .. '9' )*\n match_range( 0x31, 0x39 )\n # at line 525:34: ( '0' .. '9' )*\n while true # decision 20\n alt_20 = 2\n look_20_0 = @input.peek( 1 )\n\n if ( look_20_0.between?( 0x30, 0x39 ) )\n alt_20 = 1\n\n end\n case alt_20\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x30, 0x39 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n break # out of loop for decision 20\n end\n end # loop for decision 20\n\n\n end\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 69 )\n\n\n end", "def type_literal_generic_integer(column)\n :integer\n end", "def integer_print_10\nend", "def number_or_zero(s)\n i= s.to_i\n i.to_s == s ? i : 0\n end", "def zero_insert(n)\n result = ''\n index, n = 0, n.to_s\n while index < n.length - 1\n a, b = n[index].to_i, n[index + 1].to_i\n result += a.to_s\n result += '0' if a == b || (a + b) % 10 == 0\n index += 1\n end\n result += n[index]\n result\nend", "def to_digit\n return NUMBER[self] if self <= 9 && self >= 0\n NUMBER[0]\n end", "def two_digits_number(x)\n x < 10 ? \"0#{x}\" : x.to_s\n end", "def atoi(string_int)\n sum = 0\n string_int.each_byte do |char|\n sum = (sum * 10) + (char - ZERO_ASCII_CODE)\n end\n\n return sum\nend", "def octal_esc!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 8 )\n\n \n # - - - - main rule block - - - -\n # at line 302:5: ( '\\\\\\\\' ( '0' .. '3' ) ( '0' .. '7' ) ( '0' .. '7' ) | '\\\\\\\\' ( '0' .. '7' ) ( '0' .. '7' ) | '\\\\\\\\' ( '0' .. '7' ) )\n alt_3 = 3\n look_3_0 = @input.peek( 1 )\n\n if ( look_3_0 == 0x5c )\n look_3_1 = @input.peek( 2 )\n\n if ( look_3_1.between?( 0x30, 0x33 ) )\n look_3_2 = @input.peek( 3 )\n\n if ( look_3_2.between?( 0x30, 0x37 ) )\n look_3_4 = @input.peek( 4 )\n\n if ( look_3_4.between?( 0x30, 0x37 ) )\n alt_3 = 1\n else\n alt_3 = 2\n end\n else\n alt_3 = 3\n end\n elsif ( look_3_1.between?( 0x34, 0x37 ) )\n look_3_3 = @input.peek( 3 )\n\n if ( look_3_3.between?( 0x30, 0x37 ) )\n alt_3 = 2\n else\n alt_3 = 3\n end\n else\n raise NoViableAlternative( \"\", 3, 1 )\n end\n else\n raise NoViableAlternative( \"\", 3, 0 )\n end\n case alt_3\n when 1\n # at line 302:9: '\\\\\\\\' ( '0' .. '3' ) ( '0' .. '7' ) ( '0' .. '7' )\n match( 0x5c )\n # at line 302:14: ( '0' .. '3' )\n # at line 302:15: '0' .. '3'\n match_range( 0x30, 0x33 )\n\n # at line 302:25: ( '0' .. '7' )\n # at line 302:26: '0' .. '7'\n match_range( 0x30, 0x37 )\n\n # at line 302:36: ( '0' .. '7' )\n # at line 302:37: '0' .. '7'\n match_range( 0x30, 0x37 )\n\n\n when 2\n # at line 303:9: '\\\\\\\\' ( '0' .. '7' ) ( '0' .. '7' )\n match( 0x5c )\n # at line 303:14: ( '0' .. '7' )\n # at line 303:15: '0' .. '7'\n match_range( 0x30, 0x37 )\n\n # at line 303:25: ( '0' .. '7' )\n # at line 303:26: '0' .. '7'\n match_range( 0x30, 0x37 )\n\n\n when 3\n # at line 304:9: '\\\\\\\\' ( '0' .. '7' )\n match( 0x5c )\n # at line 304:14: ( '0' .. '7' )\n # at line 304:15: '0' .. '7'\n match_range( 0x30, 0x37 )\n\n\n end\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 8 )\n\n end", "def format_int(n)\n return - 2**31 if n < -2**31\n return 2**31 - 1 if n > 2**31 - 1\n n\nend", "def test_encode_integer\n\n # Fixnum\n #\n #assert_equal( \"\\x02\\x02\\x96\\x46\", -27_066.to_ber )\n #assert_equal( \"\\x02\\x02\\xFF\\x7F\", -129.to_ber )\n #assert_equal( \"\\x02\\x01\\x80\", -128.to_ber )\n #assert_equal( \"\\x02\\x01\\xFF\", -1.to_ber )\n\n assert_equal( \"\\x02\\x01\\x00\", 0.to_ber )\n assert_equal( \"\\x02\\x01\\x01\", 1.to_ber )\n assert_equal( \"\\x02\\x01\\x7F\", 127.to_ber )\n assert_equal( \"\\x02\\x01\\x80\", 128.to_ber )\n assert_equal( \"\\x02\\x01\\xFF\", 255.to_ber )\n\n assert_equal( \"\\x02\\x02\\x01\\x00\", 256.to_ber )\n assert_equal( \"\\x02\\x02\\xFF\\xFF\", 65535.to_ber )\n\n assert_equal( \"\\x02\\x03\\x01\\x00\\x00\", 65536.to_ber )\n assert_equal( \"\\x02\\x03\\xFF\\xFF\\xFF\", 16_777_215.to_ber )\n\n assert_equal( \"\\x02\\x04\\x01\\x00\\x00\\x00\", 0x01000000.to_ber )\n assert_equal( \"\\x02\\x04\\x3F\\xFF\\xFF\\xFF\", 0x3FFFFFFF.to_ber )\n\n # Bignum\n #\n assert_equal( \"\\x02\\x04\\x4F\\xFF\\xFF\\xFF\", 0x4FFFFFFF.to_ber )\n #assert_equal( \"\\x02\\x05\\x00\\xFF\\xFF\\xFF\\xFF\", 0xFFFFFFFF.to_ber )\n end", "def cast_to_int(s, max_num_size = DEFAULT_MAX_NUM_SIZE)\n data = s.htb\n raise '\"script number overflow\"' if data.bytesize > max_num_size\n if require_minimal && data.bytesize > 0\n if data.bytes[-1] & 0x7f == 0 && (data.bytesize <= 1 || data.bytes[data.bytesize - 2] & 0x80 == 0)\n raise 'non-minimally encoded script number'\n end\n end\n Script.decode_number(s)\n end", "def leading_zeroes(max_num_length)\n 101.times do |i|\n num_of_zeroes = max_num_length - i.digits.length\n puts \"0\" * num_of_zeroes + i.to_s\n end\nend", "def test_char_literal_simple\n check(C::CharLiteral, <<-EOS)\n |'x'\n EOS\n end", "def promotion_code(digits: T.unsafe(nil)); end", "def set_oct\n @oct = Oct.find(params[:id])\n end", "def remove_leading_zero(arg)\n arg.to_s.sub(\"0\", \"\")\n end", "def integer!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 26 )\n\n\n\n type = INTEGER\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 191:10: ( '0' .. '9' )+\n # at file 191:10: ( '0' .. '9' )+\n match_count_1 = 0\n while true\n alt_1 = 2\n look_1_0 = @input.peek( 1 )\n\n if ( look_1_0.between?( 0x30, 0x39 ) )\n alt_1 = 1\n\n end\n case alt_1\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x30, 0x39 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n match_count_1 > 0 and break\n eee = EarlyExit(1)\n\n\n raise eee\n end\n match_count_1 += 1\n end\n\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 26 )\n\n\n end", "def cvrt_a_ins(a)\n \"%016b\" % a\n end", "def integer(i, n)\n limit = 2**n - 1\n return [i].pack('C') if i < limit\n\n bytes = []\n bytes.push limit unless n.zero?\n\n i -= limit\n while (i >= 128)\n bytes.push((i % 128) + 128)\n i /= 128\n end\n\n bytes.push i\n bytes.pack('C*')\n end", "def zeropad(arg)\n # make sure it is a string\n arg = arg.to_s\n arg = '0'+arg if arg.length < 2\n return arg\nend", "def DISABLED_test_integer_right_paren_sequence\n assert_tokenises_as '2)2', IntegerToken.new(2), RightParenthesisToken.instance, IntegerToken.new(2)\n assert_tokenises_as '-2)-2', IntegerToken.new(-2), RightParenthesisToken.instance, IntegerToken.new(-2)\n end", "def reverse_ruby(x)\n x_rev_signed = x.positive? ? '' : '-'\n x_rev = (x_rev_signed + x.to_s.split('-').last.reverse).to_i\n x_rev.bit_length > 31 ? 0 : x_rev\nend", "def int32()\n _int32(\"int32\")\n end", "def atoi(s)\n\t\tnum = 0\n\t\tacc = 0\n\t\ts.length.downto(0) { |i|\n\t\t\tif s[i] == '0'\n\t\t\t\tnum = 0\n\t\t\telsif s[i] == '1'\n\t\t\t\tnum = 1\n\t\t\telsif s[i] == '2'\n\t\t\t\tnum = 2\n\t\t\telsif s[i] == '3'\n\t\t\t\tnum = 3\n\t\t\telsif s[i] == '4'\n\t\t\t\tnum = 4\n\t\t\telsif s[i] == '5'\n\t\t\t\tnum = 5\n\t\t\telsif s[i] == '6'\n\t\t\t\tnum = 6\n\t\t\telsif s[i] == '7'\n\t\t\t\tnum = 7\n\t\t\telsif s[i] == '8'\n\t\t\t\tnum = 8\n\t\t\telsif s[i] == '9'\n\t\t\t\tnum = 9\n\t\t\telsif s[i] == '-' && i == 0\n\t\t\t\tnum = 0\n\t\t\t\t\tacc *= -1\n\t\t\t\telsif s[i] == nil || (s[i] == '+' && i == 0)\n\t\t\t\tnum = 0\n\t\t\telse\n\t\t\t\tabort(\"No se ingreso un número válido.\")\n\t\t\tend\n\t\t\tif num > 0\n\t\t\t\tacc += num * 10** (s.length-1-i)\n\t\t\tend\n\t\t}\n\t\tacc\n\tend", "def test_java_char10\n token, value, rest = @java.lex(\"'\\\\09'\")\n assert_equal(:error, token)\n end", "def DISABLED_test_integer_left_paren_sequence\n assert_tokenises_as '2(2', IntegerToken.new(2), LeftParenthesisToken.instance, IntegerToken.new(2)\n assert_tokenises_as '-2(-2', IntegerToken.new(-2), LeftParenthesisToken.instance, IntegerToken.new(-2)\n end", "def type_literal_generic_integer(column)\n column[:serial] ? :serial : super\n end", "def type_literal_generic_integer(column)\n column[:serial] ? :serial : super\n end", "def hex_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 68 )\n\n\n\n type = HexLiteral\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 523:14: '0' ( 'x' | 'X' ) ( HexDigit )+\n match( 0x30 )\n if @input.peek(1) == 0x58 || @input.peek(1) == 0x78\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n # at file 523:28: ( HexDigit )+\n match_count_19 = 0\n while true\n alt_19 = 2\n look_19_0 = @input.peek( 1 )\n\n if ( look_19_0.between?( 0x30, 0x39 ) || look_19_0.between?( 0x41, 0x46 ) || look_19_0.between?( 0x61, 0x66 ) )\n alt_19 = 1\n\n end\n case alt_19\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x30, 0x39 ) || @input.peek( 1 ).between?( 0x41, 0x46 ) || @input.peek( 1 ).between?( 0x61, 0x66 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n match_count_19 > 0 and break\n eee = EarlyExit(19)\n\n\n raise eee\n end\n match_count_19 += 1\n end\n\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 68 )\n\n\n end", "def start_number?(text = T.unsafe(nil)); end", "def integer sexp\n [:pushl, car(sexp).to_s.to_i]\n end" ]
[ "0.71400386", "0.70107275", "0.6917653", "0.6913211", "0.6716382", "0.6342048", "0.6255834", "0.6240928", "0.6075841", "0.6063916", "0.60505784", "0.6028", "0.5994561", "0.5980611", "0.59784025", "0.59563553", "0.5898845", "0.5700946", "0.56559426", "0.565179", "0.564919", "0.56429124", "0.5626226", "0.5603092", "0.5569071", "0.55454487", "0.5543808", "0.55029815", "0.5499112", "0.5451617", "0.5441691", "0.5429432", "0.5420662", "0.5417929", "0.5414758", "0.5403586", "0.53911686", "0.53769857", "0.5353379", "0.5352359", "0.5349046", "0.5346192", "0.53440017", "0.5341119", "0.5285882", "0.5283216", "0.5278708", "0.5258701", "0.5231784", "0.5225696", "0.5225696", "0.522195", "0.52134717", "0.52134717", "0.5192844", "0.5191473", "0.51879686", "0.51879686", "0.51832867", "0.5181456", "0.5180426", "0.51780385", "0.5171529", "0.5158055", "0.5151148", "0.51437336", "0.514341", "0.5126264", "0.5124067", "0.5120132", "0.51194346", "0.51035297", "0.50997585", "0.5098091", "0.50886863", "0.5082154", "0.50809216", "0.5074106", "0.5072368", "0.506369", "0.5063106", "0.50599074", "0.5050001", "0.5048025", "0.5033079", "0.5031218", "0.5027952", "0.5025822", "0.5025019", "0.5011586", "0.50069547", "0.50059575", "0.5003372", "0.50019217", "0.49972352", "0.4995854", "0.4995854", "0.49936962", "0.4990256", "0.4989295" ]
0.7675887
0
7.8.3 DecimalLiteral :: DecimalIntegerLiteral . DecimalDigitsopt ExponentPartopt . DecimalDigits ExponentPartopt DecimalIntegerLiteral ExponentPartopt
def decimal_literal pos0 = @pos code = @codes[@pos] if code.nil? return nil elsif code == 0x2e #. @pos += 1 f = decimal_digits if f.nil? #=> this period is punctuator @pos = pos0 + 1 return ECMA262::PUNC_PERIOD end if (code = @codes[@pos]) == 0x65 || code == 0x45 @pos += 1 e = exponent_part end if identifier_start?(@codes[@pos]) raise ParseError.new("The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit", self) end return ECMA262::ECMA262Numeric.new('0', f, e) elsif code == 0x30 # zero i = "0" @pos += 1 if @codes[@pos] == 0x2e #. @pos += 1 f = decimal_digits if (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E @pos += 1 e = exponent_part end elsif (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E @pos += 1 e = exponent_part end if identifier_start?(@codes[@pos]) raise ParseError.new("The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit", self) end return ECMA262::ECMA262Numeric.new(i, f, e) elsif code >= 0x31 and code <= 0x39 i = decimal_digits if @codes[@pos] == 0x2e #. @pos += 1 f = decimal_digits if (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E @pos += 1 e = exponent_part end elsif (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E @pos += 1 e = exponent_part end if identifier_start?(@codes[@pos]) raise ParseError.new("The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit", self) end return ECMA262::ECMA262Numeric.new(i, f, e) end nil end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decimal_part(digits: T.unsafe(nil)); end", "def numeric_literal\n hex_integer_literal || octal_integer_literal || decimal_literal\n end", "def DISABLED_test_decimal_exponent_sequence\n assert_tokenises_as '2.^2.0', DecimalToken.new(2), ExponentOpToken.instance, DecimalToken.new(2)\n assert_tokenises_as '2.0^-2.', DecimalToken.new(2), ExponentOpToken.instance, DecimalToken.new(-2)\n end", "def DISABLED_test_decimal_plus_sequence\n assert_tokenises_as '2.+2.0', DecimalToken.new(2), AddOpToken.instance, DecimalToken.new(2)\n assert_tokenises_as '2.0+-2.', DecimalToken.new(2), AddOpToken.instance, DecimalToken.new(-2)\n end", "def decimal_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 69 )\n\n\n\n type = DecimalLiteral\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 525:18: ( '0' | '1' .. '9' ( '0' .. '9' )* )\n # at line 525:18: ( '0' | '1' .. '9' ( '0' .. '9' )* )\n alt_21 = 2\n look_21_0 = @input.peek( 1 )\n\n if ( look_21_0 == 0x30 )\n alt_21 = 1\n elsif ( look_21_0.between?( 0x31, 0x39 ) )\n alt_21 = 2\n else\n raise NoViableAlternative( \"\", 21, 0 )\n\n end\n case alt_21\n when 1\n # at line 525:19: '0'\n match( 0x30 )\n\n when 2\n # at line 525:25: '1' .. '9' ( '0' .. '9' )*\n match_range( 0x31, 0x39 )\n # at line 525:34: ( '0' .. '9' )*\n while true # decision 20\n alt_20 = 2\n look_20_0 = @input.peek( 1 )\n\n if ( look_20_0.between?( 0x30, 0x39 ) )\n alt_20 = 1\n\n end\n case alt_20\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x30, 0x39 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n break # out of loop for decision 20\n end\n end # loop for decision 20\n\n\n end\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 69 )\n\n\n end", "def decimal(name, options={})\n param(:decimal, name, options)\n end", "def DISABLED_test_decimals\n assert_tokenises_as '0.0', DecimalToken.new(0)\n assert_tokenises_as '1.', DecimalToken.new(1)\n assert_tokenises_as ' 3.14159', DecimalToken.new(3.14159)\n assert_tokenises_as '-2.0 ', DecimalToken.new(-2)\n end", "def decimal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 5)\n\n type = DECIMAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 25:5: ( '-' )? '1' .. '9' ( '0' .. '9' )*\n # at line 25:5: ( '-' )?\n alt_9 = 2\n look_9_0 = @input.peek(1)\n\n if (look_9_0 == ?-) \n alt_9 = 1\n end\n case alt_9\n when 1\n # at line 25:5: '-'\n match(?-)\n\n end\n match_range(?1, ?9)\n # at line 25:20: ( '0' .. '9' )*\n loop do #loop 10\n alt_10 = 2\n look_10_0 = @input.peek(1)\n\n if (look_10_0.between?(?0, ?9)) \n alt_10 = 1\n\n end\n case alt_10\n when 1\n # at line 25:21: '0' .. '9'\n match_range(?0, ?9)\n\n else\n break #loop 10\n end\n end\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 5)\n\n end", "def DISABLED_test_single_decimal\n assert_parses_to [DecimalToken.new(-1)],\n ExpressionNode.new(\n TermNode.new(\n FactorNode.new(\n BaseNode.new(\n DecimalToken.new(-1))),\n TermPrimeNode.new),\n ExpressionPrimeNode.new)\n end", "def decimal_part(digits: 10)\n num = ''\n if digits > 1\n num = non_zero_digit\n digits -= 1\n end\n leading_zero_number(digits: digits) + num.to_s\n end", "def DISABLED_test_decimal_multiply_sequence\n assert_tokenises_as '2.*2.0', DecimalToken.new(2), MultiplyOpToken.instance, DecimalToken.new(2)\n assert_tokenises_as '2.0*-2.', DecimalToken.new(2), MultiplyOpToken.instance, DecimalToken.new(-2)\n end", "def decimal_limit; end", "def decimal(s)\n BigDecimal.new(s)\n end", "def to_decimal\n octal_string =~ INVALID_OCTAL ? 0 : calculate\n end", "def decimal_mark; end", "def DISABLED_test_decimal_left_paren_sequence\n assert_tokenises_as '2.(2.0', DecimalToken.new(2), LeftParenthesisToken.instance, DecimalToken.new(2)\n assert_tokenises_as '-2.0(-2.', DecimalToken.new(-2), LeftParenthesisToken.instance, DecimalToken.new(-2)\n end", "def tokenize_exponent_literal\n advance # Pass the e/E\n advance if cchar == '+' or cchar == '-'\n advance until( /[0-9]/.match( cchar ).nil? )\n capture_token( :exp_literal )\n end", "def toDecimal\r\n self\r\n end", "def type_literal_generic_bigdecimal(column)\n type_literal_generic_numeric(column)\n end", "def parse_numeric_constant\n if peek?(:LIT_INT)\n ExprInt.new(expect(:LIT_INT))\n else\n ExprFloat.new(expect(:LIT_FLOAT))\n end\n end", "def test_decimals\n assert_equal( \"+(+(1., 2.2), 3.325)\" , @p.prefix_form(\"1.+2.2+3.325\") )\n assert_equal( \"+(0.0, .2)\" , @p.prefix_form(\"0.0+.2\") )\n assert_raise(ArgumentError) { @p.prefix_form(\".+1\") }\n end", "def DISABLED_test_decimal_minus_sequence\n assert_tokenises_as '2.-2.0', DecimalToken.new(2), SubtractOpToken.instance, DecimalToken.new(2)\n assert_tokenises_as '2.0--2.', DecimalToken.new(2), SubtractOpToken.instance, DecimalToken.new(-2)\n end", "def exponent_part\n if (code = @codes[@pos]) == 0x2b\n @pos += 1\n elsif code == 0x2d\n @pos += 1\n neg = true\n end\n d = decimal_digits\n raise ParseError.new(\"unexpecting token\", self) if d.nil?\n if neg\n e = \"-#{d}\"\n else\n e = d\n end\n e\n end", "def DISABLED_test_decimal_right_paren_sequence\n assert_tokenises_as '2.)2.0', DecimalToken.new(2), RightParenthesisToken.instance, DecimalToken.new(2)\n assert_tokenises_as '-2.0)-2.', DecimalToken.new(-2), RightParenthesisToken.instance, DecimalToken.new(-2)\n end", "def DISABLED_test_several_numbers\n assert_tokenises_as '1 -2 3. -4.0',\n IntegerToken.new(1),\n IntegerToken.new(-2),\n DecimalToken.new(3),\n DecimalToken.new(-4)\n end", "def bigdecimal(n)\n return object(n.to_f) #, :as => :bigdecimal\n end", "def DISABLED_test_integer_exponent_sequence\n assert_tokenises_as '2^2', IntegerToken.new(2), ExponentOpToken.instance, IntegerToken.new(2)\n assert_tokenises_as '2^-2', IntegerToken.new(2), ExponentOpToken.instance, IntegerToken.new(-2)\n end", "def octal(digits)\n string(OCTAL_DIGITS, digits)\n end", "def decimal_places; end", "def type\n 'Edm.Decimal'\n end", "def DISABLED_test_decimal_divide_sequence\n assert_tokenises_as '2./2.0', DecimalToken.new(2), DivideOpToken.instance, DecimalToken.new(2)\n assert_tokenises_as '2.0/-2.', DecimalToken.new(2), DivideOpToken.instance, DecimalToken.new(-2)\n end", "def formatted_number(decimal)\n decimal.truncate(NUM_DECIMAL_PLACES).to_s(FLOATING_POINT_NOTATION)\n end", "def decimals; num_dec; end", "def xtest_int_literal_big\n check(C::IntLiteral, <<-EOS)\n |10000000000\n EOS\n end", "def decimal\n @decimal ||= implied_probability.decimal\n end", "def decimal\n @decimal ||= implied_probability.decimal\n end", "def initialize(_val, _precs=0)\n # MRI appears to ignore the precision argument\n v = _val.strip\n first_ch = v[0]\n sgn = 1\n if first_ch._equal?( ?+ )\n first_ch = v[1]\n elsif first_ch._equal?( ?- )\n first_ch = v[1]\n sgn = -1\n end\n if first_ch._equal?( ?N ) && v == \"NaN\"\n self.__init_nan\n return\n elsif first_ch._equal?( ?I ) && v =~ /^[-+]?Infinity$/\n self.__init_infinity(sgn)\n return\n end\n @sign = sgn\n @special = 0\n v = v.__delete_underscore\n m = /^\\s*(([-+]?)(\\d*)(?:\\.(\\d*))?(?:[EeDd]([-+]?\\d+))?).*$/.match(v)\n mant = 0\n expon = 0\n ndigits = 0\n if m._not_equal?(nil) # [\n @sign = sgn\n i_cls = Integer\n frac_str = m[4]\n if frac_str._equal?(nil)\n frac = 0\n nd_frac = 0\n else\n frlen = frac_str.length\n nd_frac = frlen\n if frlen._not_equal?(0)\n # strip trailing zeros from fraction\n fend_idx = frlen - 1\n while fend_idx > 0 && frac_str[fend_idx]._equal?( ?0 )\n fend_idx -= 1\n end\n frlen = fend_idx + 1\n end\n frac_prefix_str = frac_str[0, frlen] # without trailing zeros\n frac = i_cls.__from_string_radix( frac_prefix_str , 10)\n end\n int_str = m[3]\n nd_int = int_str.length\n if nd_int._equal?(0)\n int = 0\n elsif int_str[0]._equal?( ?0 )\n int = i_cls.__from_string_radix( int_str, 10 ) # leading zero digit\n else\n j = nd_int - 1\n if frac_str._equal?(nil)\n # strip trailing zeros off integer to reduce chance of integer overflow\n while int_str[j]._equal?( ?0 ) and j > 0\n expon += 1\n j -= 1\n end\n end\n int = i_cls.__from_string_radix( int_str[0, j+1], 10 )\n end\n exp_str = m[5]\n expon += exp_str._equal?(nil) ? 0 : i_cls.__from_string_radix( exp_str, 10 )\n if int == 0 && frac != 0\n expon -= frlen # adjust for decimal point at rhs of internal digits\n # adjust precision for number of leading zeros in fraction\n fidx = 0\n while fidx < frlen && frac_str[fidx]._equal?( ?0 )\n fidx += 1\n nd_frac -= 1\n end\n elsif frac_prefix_str._not_equal?(nil)\n # multiply int by 10**frac_prefix_str.length\n count = frac_prefix_str.length\n int = __multiply_by_tenpower(int, count)\n expon -= count\n end\n mant = int + frac\n end # ]\n # MRI appears to ignore precision arg to new and add about 17 ...\n if nd_frac._equal?(0)\n @precs = UNLIM_PRECISION\n else\n @precs = nd_frac + nd_int + 17\n end\n @digits = mant\n unless expon._isFixnum\n raise FloatDomainError, 'exponent of a BigDecimal exceeds Fixnum range'\n end\n @exp = expon\n end", "def to_decimal(float_price)\n decimal.Decimal('%<float_price>.2f', float_price: float_price)\nend", "def to_ps_money\n return nil if self.nil?\n return \"00\" if self.to_f == 0\n \n value = self.to_s\n\n # obtem a parte fracionaria e transforma em string.\n frac = value.to_f - value.to_i\n frac = frac.to_s + \"0\" \n frac = frac[2..3]\n # Se tiver parte inteira, concatena com a parte fracionaria\n inteiro = \"\"\n inteiro = value.to_i.to_s if value.to_f.truncate > 0\n inteiro + frac\n end", "def e_built_in(number_of_decimals)\n\tprintf(\"%.#{number_of_decimals}f\", E(number_of_decimals))\n\tprintf \"\\n\"\nend", "def test_native_decimal_insert_manual_vs_automatic\n correct_value = '0012345678901234567890.0123456789'.to_d\n\n Person.delete_all\n Person.connection.add_column \"people\", \"wealth\", :decimal, :precision => '30', :scale => '10'\n Person.reset_column_information\n\n # Do a manual insertion\n if current_adapter?(:OracleAdapter)\n Person.connection.execute \"insert into people (id, wealth) values (people_seq.nextval, 12345678901234567890.0123456789)\"\n elsif current_adapter?(:OpenBaseAdapter) || (current_adapter?(:MysqlAdapter) && Mysql.client_version < 50003) #before mysql 5.0.3 decimals stored as strings\n Person.connection.execute \"insert into people (wealth) values ('12345678901234567890.0123456789')\"\n else\n Person.connection.execute \"insert into people (wealth) values (12345678901234567890.0123456789)\"\n end\n\n # SELECT\n row = Person.find(:first)\n assert_kind_of BigDecimal, row.wealth\n\n # If this assert fails, that means the SELECT is broken!\n unless current_adapter?(:SQLite3Adapter)\n assert_equal correct_value, row.wealth\n end\n\n # Reset to old state\n Person.delete_all\n\n # Now use the Rails insertion\n assert_nothing_raised { Person.create :wealth => BigDecimal.new(\"12345678901234567890.0123456789\") }\n\n # SELECT\n row = Person.find(:first)\n assert_kind_of BigDecimal, row.wealth\n\n # If these asserts fail, that means the INSERT (create function, or cast to SQL) is broken!\n unless current_adapter?(:SQLite3Adapter)\n assert_equal correct_value, row.wealth\n end\n\n # Reset to old state\n Person.connection.del_column \"people\", \"wealth\" rescue nil\n Person.reset_column_information\n end", "def decimal_digits\n pos0 = @pos\n if (code = @codes[@pos]) >= 0x30 and code <= 0x39\n @pos += 1\n while code = @codes[@pos] and code >= 0x30 and code <= 0x39\n @pos += 1\n end\n return @codes[pos0...@pos].pack(\"U*\")\n else\n nil\n end\n end", "def exp10\n Double.new(10**self.to_f)\n end", "def tokenize_float_literal\n advance # Pass the .\n\n until( /[0-9eE]/.match( cchar ).nil? )\n if cchar == 'e' || cchar == 'E'\n return tokenize_exponent_literal\n end\n advance\n end\n capture_token( :float_literal )\n end", "def octal_integer_literal\n code = @codes[@pos]\n if code.nil?\n return nil\n elsif code == 0x30 and (code1 = @codes[@pos + 1]) >= 0x30 and code1 <= 0x37\n @pos += 1\n pos0 = @pos\n while code = @codes[@pos] and code >= 0x30 and code <= 0x37\n @pos += 1\n end\n if identifier_start?(code)\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n else\n return ECMA262::ECMA262Numeric.new(@codes[pos0...@pos].pack(\"U*\").to_i(8))\n end\n else\n nil\n end\n end", "def bigdecimal\n Util.from_bytes :bigdecimal, value\n end", "def number(digits: T.unsafe(nil)); end", "def testPositiveDots\n assert_raise( RangeError ) {\n dm = DotManager.new( 0, 0, 100, 1, 99 )\n }\n end", "def exp(number, exponent)\n number * (10**exponent)\nend", "def toStringDecimal(prec = 100, arr = toDigArray(prec))\n string = arr.shift.to_s\n string += \".\"\n arr.each{|x| string += x.to_s}\n string\n end", "def decimal_total\n sprintf(\"%03d\", total).insert(-3, \".\")\n end", "def typecast_value_decimal(value)\n if value.is_a?(String)\n BigDecimal.new(value)\n else\n super\n end\n end", "def numerify(number_string, leading_zero: T.unsafe(nil)); end", "def octal_val(decimal)\n octal = 0\n i = 0\n num = decimal\n\n loop do\n break if num <= 0\n octal += (num.divmod(8)[1] * (10 ** i))\n num = num.divmod(8)[0]\n i += 1\n end\n\n octal\nend", "def set_exponent(exp)\n\t\tcase \n\t\twhen (exp%3 == 1 and exp > 3)\n\t\t\t@@english_numeral += \"ten #{EXPONENTS[exp]}\" + ' ' \n\n\t\twhen (exp%3 == 2 and exp > 3)\n\t\t\t@@english_numeral += \"hundred #{EXPONENTS[exp]}\" + ' '\n\n\t\telse\n\t\t\t@@english_numeral += \"#{EXPONENTS[exp]}\" + ' '\n\n\t\tend\n\t\t\t\n\tend", "def decimal_places=(decimal_places)\n if !decimal_places.nil? && decimal_places < 0\n fail ArgumentError, 'invalid value for \"decimal_places\", must be greater than or equal to 0.'\n end\n\n @decimal_places = decimal_places\n end", "def type_literal_generic_fixnum(column)\n type_literal_generic_integer(column)\n end", "def type_literal_generic_bignum_symbol(column)\n :bigint\n end", "def decimal(digits, base)\n e = digits.size - 1\n v = 0\n digits.each do |n|\n v += n.to_i * base**e\n e -= 1\n end\n v\n end", "def eround(decimal_points = 0)\n (\"%.#{decimal_points}e\" % self).to_f\n end", "def conversion_precision=(_arg0); end", "def has_decimal_part?()\n #This is a stub, used for indexing\n end", "def prec_i() end", "def to_numeric(anything)\n\n num = BigDecimal.new(anything.to_s)\n if num.frac == 0\n num.to_i\n else\n num.to_r\n end\n end", "def typecast_value_decimal(value)\n case value\n when BigDecimal\n value\n when Numeric\n BigDecimal.new(value.to_s)\n when String\n _typecast_value_string_to_decimal(value)\n else\n raise InvalidValue, \"invalid value for BigDecimal: #{value.inspect}\"\n end\n end", "def to_decimal\n @value.digits.map.with_index do |current, index|\n current * (3 ** index)\n end.sum\n end", "def decimal_places_suffix\n \"\"\n end", "def numeric(key, options = {})\n before_all(key, options)\n match?(key, /\\A([+-]?\\d+)([,.](\\d+))?\\Z/) ? store_decimal(key, ->(item){item.to_s.sub(/,/, \".\").to_f}, options) : raise_type_error(key, \"Numeric\")\n end", "def *(other)\n RDF::Literal(to_d * (other.respond_to?(:to_d) ? other.to_d : BigDecimal(other.to_s)))\n end", "def DISABLED_test_integer_plus_sequence\n assert_tokenises_as '2+2', IntegerToken.new(2), AddOpToken.instance, IntegerToken.new(2)\n assert_tokenises_as '2+-2', IntegerToken.new(2), AddOpToken.instance, IntegerToken.new(-2)\n end", "def conversion_precision; end", "def write_num_lit(data)\n write_num_base('c:numLit', data)\n end", "def to_decimal\n return @bin.each_char.inject(0) { |sum, digit| sum * 2 + digit.to_i }\n end", "def test_regexp_number_separator\n assert_equal '1.234', @TS_Regexp.format(1234, '.')\n end", "def round_prec(digits)\n #This is a stub, used for indexing\n end", "def check_for_valid_decimal(decimal_param)\n /\\d{1,2},\\d{1,2}/ === decimal_param\n end", "def number\n result = ''\n while @current_char and @current_char =~ /[[:digit:]]/\n result << @current_char\n advance\n end\n\n if @current_char == '.'\n result << @current_char\n advance\n while @current_char and @current_char =~ /[[:digit:]]/\n result << @current_char\n advance\n end\n Token.new(:real_const, result.to_f)\n else\n Token.new(:integer_const, result.to_i)\n end\n end", "def exponent!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 3)\n\n \n # - - - - main rule block - - - -\n # at line 17:5: ( 'e' | 'E' ) ( '-' )? DECIMAL\n if @input.peek(1) == ?E || @input.peek(1) == ?e\n @input.consume\n else\n mse = MismatchedSet(nil)\n recover(mse)\n raise mse\n end\n\n\n # at line 17:15: ( '-' )?\n alt_6 = 2\n look_6_0 = @input.peek(1)\n\n if (look_6_0 == ?-) \n alt_6 = 1\n end\n case alt_6\n when 1\n # at line 17:15: '-'\n match(?-)\n\n end\n decimal!\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 3)\n\n end", "def DISABLED_test_exponent_token\n assert_tokenises_as '^', ExponentOpToken.instance\n assert_tokenises_as ' ^', ExponentOpToken.instance\n assert_tokenises_as ' ^ ', ExponentOpToken.instance\n end", "def typecast_to_bigdecimal(value)\n typecast_to_numeric(value, :to_d)\n end", "def dig(dig,num)\n\tnum/dig%10\nend", "def initialize(digits)\n @value = digits.to_s.to_i\n end", "def decimals(n, rounded=true)\n digits 10, n, rounded\n end", "def power_from_db(decible)\r\n 10 ** (decible / 10.0)\r\n end", "def number_with_precision(number, precision=3)\n \"%01.#{precision}f\" % number\n rescue\n number\n end", "def literal_integer(v)\n if v > 9223372036854775807 || v < -9223372036854775808\n literal_integer_outside_bigint_range(v)\n else\n v.to_s\n end\n end", "def to_d\n BigDecimal(cents.to_s) / 10 ** precision\n end", "def float_dec(x)\n Numerals::Format[:free, :exact_input].write(x)\nend", "def delimited_number(value, symbol: '', delimiter: ',', no_decimals: 2)\n val = value.nil? ? 0.0 : value\n parts = format(\"#{symbol}%.#{no_decimals}f\", val).split('.')\n parts[0] = parts.first.reverse.gsub(/([0-9]{3}(?=([0-9])))/, \"\\\\1#{delimiter}\").reverse\n parts.join('.')\n end", "def read_number(token)\n current = @marker.character\n is_float = current == ?.\n is_exponent = false\n token.kind = is_float ? :float_lit : :integer_lit\n\n while (current = peek_next())\n case current\n # Float lit\n when ?.\n break if is_float == true\n is_float = true\n token.kind = :float_lit\n read_next()\n\n # Digit\n when ?0, ?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9\n read_next()\n\n # Exponent\n when ?e, ?E\n if is_exponent\n token.kind = :invalid\n raise_error(:duplicate_exponent,\n \"Malformed number literal: exponent already provided\")\n end\n\n is_exponent = true\n token.kind = is_float ? :float_exp_lit : :integer_exp_lit\n\n read_next()\n current = read_next()\n current = read_next() if current == ?- || current == ?+\n\n if current < ?0 || current > ?9\n raise_error(:malformed_exponent, \"Malformed number literal: exponent expected but not provided\")\n end\n\n else break\n end\n end\n\n token.value = @source[(token.from .. @marker.source_index)]\n end", "def prec(*) end", "def leading_zero_number(digits: T.unsafe(nil)); end", "def lex_en_expr_dot; end", "def lex_en_expr_dot; end", "def lex_en_expr_dot; end", "def test_IntegerLiterals_sample02\n assert_equal(\"Fixnum\", 1_000_000_000.class.to_s)\n end", "def type_literal_generic_float(column)\n :\"double precision\"\n end", "def test_float_literal\n check(C::FloatLiteral, <<-EOS)\n |1.0\n EOS\n end", "def apply_validations_for_decimal\n apply_validations_for_float\n end", "def assert_numeric(value, pos)\n if value =~ /^0[xX]/\n lex_error(Issues::INVALID_HEX_NUMBER, {:value => value}, pos) unless value =~ /^0[xX][0-9A-Fa-f]+$/\n\n elsif value =~ /^0[^.]/\n lex_error(Issues::INVALID_OCTAL_NUMBER, {:value => value}, pos) unless value =~ /^0[0-7]+$/\n\n elsif value =~ /^\\d+[eE.]/\n lex_error(Issues::INVALID_DECIMAL_NUMBER, {:value => value}, pos) unless value =~ /^\\d+(?:\\.\\d+)?(?:[eE]-?\\d+)?$/\n\n else\n lex_error(Issues::ILLEGAL_NUMBER, {:value => value}, pos) unless value =~ /^\\d+$/\n end\n end" ]
[ "0.6437505", "0.64167", "0.6341209", "0.6310946", "0.62470144", "0.62351096", "0.61921334", "0.6191976", "0.6112107", "0.5943463", "0.59008086", "0.58140814", "0.5803592", "0.5799678", "0.57473916", "0.5744536", "0.5724994", "0.56137663", "0.55923784", "0.55648774", "0.5545756", "0.5510883", "0.5500636", "0.548524", "0.54579836", "0.5417597", "0.54090476", "0.53645414", "0.53611606", "0.5335505", "0.53017867", "0.5300852", "0.5300175", "0.528121", "0.52539927", "0.52539927", "0.5245036", "0.5229178", "0.5224778", "0.5210063", "0.51881075", "0.51818925", "0.51729065", "0.5160486", "0.5153822", "0.5146199", "0.51444584", "0.51200217", "0.51074535", "0.5105678", "0.50745", "0.50724494", "0.5070435", "0.50653183", "0.5047621", "0.5040042", "0.5032545", "0.50295246", "0.50189805", "0.5013559", "0.49619132", "0.4954978", "0.49521902", "0.4940745", "0.4934669", "0.49302074", "0.49293476", "0.4918711", "0.4912107", "0.49115446", "0.48953626", "0.48917526", "0.48877913", "0.488163", "0.48787245", "0.48781362", "0.48750907", "0.48704043", "0.48603112", "0.48517212", "0.48418173", "0.48410696", "0.4840963", "0.484051", "0.4833884", "0.48337638", "0.48187134", "0.48180616", "0.4817727", "0.48157212", "0.48155436", "0.48142606", "0.48059434", "0.48059434", "0.48059434", "0.48048863", "0.4801401", "0.47953784", "0.47806942", "0.4779954" ]
0.67425597
0
7.8.3 ExponentPart :: ExponentIndicator SignedInteger
def exponent_part if (code = @codes[@pos]) == 0x2b @pos += 1 elsif code == 0x2d @pos += 1 neg = true end d = decimal_digits raise ParseError.new("unexpecting token", self) if d.nil? if neg e = "-#{d}" else e = d end e end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exponent\n e = @exp\n digs = @digits\n unless digs == 0\n e += digs.__decimal_digits_length_approx(false)\n end\n e\n end", "def exponent; end", "def exp(number, exponent)\n number * (10**exponent)\nend", "def exponent\n return @exp\n end", "def exponent\n Math.log10(subunit_to_unit).round\n end", "def DISABLED_test_integer_exponent_sequence\n assert_tokenises_as '2^2', IntegerToken.new(2), ExponentOpToken.instance, IntegerToken.new(2)\n assert_tokenises_as '2^-2', IntegerToken.new(2), ExponentOpToken.instance, IntegerToken.new(-2)\n end", "def float_from_integral_significand_exponent(s,e)\n Float.context.Num(s,e)\nend", "def tokenize_exponent_literal\n advance # Pass the e/E\n advance if cchar == '+' or cchar == '-'\n advance until( /[0-9]/.match( cchar ).nil? )\n capture_token( :exp_literal )\n end", "def exponent_v1(base, power)\n return 1 if power <= 0\n base * exponent_v1(base, power-1)\nend", "def DISABLED_test_decimal_exponent_sequence\n assert_tokenises_as '2.^2.0', DecimalToken.new(2), ExponentOpToken.instance, DecimalToken.new(2)\n assert_tokenises_as '2.0^-2.', DecimalToken.new(2), ExponentOpToken.instance, DecimalToken.new(-2)\n end", "def exponent(a=1, x)\n\ta*(Math::E**x)\nend", "def exp(base, exponent)\n return 1 if exponent.zero?\n\n base * exp(base, exponent - 1)\nend", "def ins_find_initial_exponent(nbits)\n nbits = nbits / 2 + 2 while nbits > 53\n nbits\nend", "def exponent!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 3)\n\n \n # - - - - main rule block - - - -\n # at line 17:5: ( 'e' | 'E' ) ( '-' )? DECIMAL\n if @input.peek(1) == ?E || @input.peek(1) == ?e\n @input.consume\n else\n mse = MismatchedSet(nil)\n recover(mse)\n raise mse\n end\n\n\n # at line 17:15: ( '-' )?\n alt_6 = 2\n look_6_0 = @input.peek(1)\n\n if (look_6_0 == ?-) \n alt_6 = 1\n end\n case alt_6\n when 1\n # at line 17:15: '-'\n match(?-)\n\n end\n decimal!\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 3)\n\n end", "def exp(base, exponent)\n return base if exponent == 1\n base * exp(base, exponent-1)\nend", "def calculate_exponent(num, exp)\n\treturn num ** exp\nend", "def float_to_integral_significand_exponent(x)\n Float.context.to_int_scale(x)\nend", "def power_sig_exp(b, x)\n l10 = x / Math.log(10, b)\n log10_sig_exp(l10)\nend", "def exponent1(base, power)\n\treturn 1 if power == 0\n\tbase * exponent1(base, power - 1)\nend", "def exponent(b, n)\n\nend", "def exponent(b, n)\n\nend", "def exponent(b, n)\n\nend", "def exponent_1(base, power)\n\treturn 1 if power == 0\n\tlesser_power = power - 1\n\tbase_to_the_lesser_power = exponent_1(base, lesser_power)\n\tbase * base_to_the_lesser_power\nend", "def set_exponent(exp)\n\t\tcase \n\t\twhen (exp%3 == 1 and exp > 3)\n\t\t\t@@english_numeral += \"ten #{EXPONENTS[exp]}\" + ' ' \n\n\t\twhen (exp%3 == 2 and exp > 3)\n\t\t\t@@english_numeral += \"hundred #{EXPONENTS[exp]}\" + ' '\n\n\t\telse\n\t\t\t@@english_numeral += \"#{EXPONENTS[exp]}\" + ' '\n\n\t\tend\n\t\t\t\n\tend", "def exp(char, power)\n toReturn = char\n \n for i in 2..power\n toReturn = multiply([toReturn, char])\n end\n \n toReturn\n end", "def rec_exp_1(base, power)\nend", "def exponent!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 37 )\n\n \n # - - - - main rule block - - - -\n # at line 361:12: ( 'e' | 'E' ) ( '+' | '-' )? ( '0' .. '9' )+\n if @input.peek(1) == 0x45 || @input.peek(1) == 0x65\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n # at line 361:22: ( '+' | '-' )?\n alt_14 = 2\n look_14_0 = @input.peek( 1 )\n\n if ( look_14_0 == 0x2b || look_14_0 == 0x2d )\n alt_14 = 1\n end\n case alt_14\n when 1\n # at line \n if @input.peek(1) == 0x2b || @input.peek(1) == 0x2d\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n\n end\n # at file 361:33: ( '0' .. '9' )+\n match_count_15 = 0\n while true\n alt_15 = 2\n look_15_0 = @input.peek( 1 )\n\n if ( look_15_0.between?( 0x30, 0x39 ) )\n alt_15 = 1\n\n end\n case alt_15\n when 1\n # at line 361:34: '0' .. '9'\n match_range( 0x30, 0x39 )\n\n else\n match_count_15 > 0 and break\n eee = EarlyExit(15)\n\n\n raise eee\n end\n match_count_15 += 1\n end\n\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 37 )\n\n end", "def exp(base,exponent)\n p exponent\n return 1 if exponent == 0\n return base if exponent == 1\n if exponent.even?\n result = exp(base,exponent/2)\n result * result\n else\n result = exp(base,(exponent-1)/2)\n base * (result * result)\n end\n\n\nend", "def exponent_bitvector(n)\n vec = 0\n if n < 0\n vec = 1\n n = -n\n end\n bit = 2\n ([2] + @ps).each do |p|\n e = false\n while n % p == 0\n n /= p\n e = !e\n end\n vec |= bit if e\n bit <<= 1\n end\n return vec, n\n end", "def power(base,exponent)\n\n return base ** exponent\nend", "def exponentiate(number, power)\n power = power.to_int\n puts \"#{number} ** #{power} = #{number ** power}\\n\"\nend", "def exponentiation(base, power)\n return 1 if power == 0\n base * exponentiation(base, power - 1)\nend", "def pow(base,exponent)\n base**exponent\n end", "def decimal_literal\n pos0 = @pos\n code = @codes[@pos]\n\n if code.nil?\n return nil\n elsif code == 0x2e #.\n @pos += 1\n f = decimal_digits\n if f.nil? #=> this period is punctuator\n @pos = pos0 + 1\n return ECMA262::PUNC_PERIOD\n end\n if (code = @codes[@pos]) == 0x65 || code == 0x45\n @pos += 1\n e = exponent_part\n end\n if identifier_start?(@codes[@pos])\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n end\n\n return ECMA262::ECMA262Numeric.new('0', f, e)\n elsif code == 0x30 # zero\n i = \"0\"\n @pos += 1\n if @codes[@pos] == 0x2e #.\n @pos += 1\n f = decimal_digits\n if (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E\n @pos += 1\n e = exponent_part\n end\n elsif (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E\n @pos += 1\n e = exponent_part\n end\n if identifier_start?(@codes[@pos])\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n end\n\n return ECMA262::ECMA262Numeric.new(i, f, e)\n elsif code >= 0x31 and code <= 0x39\n i = decimal_digits\n if @codes[@pos] == 0x2e #.\n @pos += 1\n f = decimal_digits\n if (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E\n @pos += 1\n e = exponent_part\n end\n elsif (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E\n @pos += 1\n e = exponent_part\n end\n if identifier_start?(@codes[@pos])\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n end\n\n return ECMA262::ECMA262Numeric.new(i, f, e)\n end\n\n nil\n end", "def exp1(base, power)\n return 1 if power <= 0\n exp1(base, power - 1) * base\nend", "def sci_not(num)\n\n return \"#{num}.00E0\" if num < 10\n\n num_str = num.to_s\n power = num_str.length - 1 #Calculates the exponential\n\n digi_float = num / (10**power).to_f\n two_deci = digi_float.round(2).to_s #Rounds of the number to 2 decimal\n\n two_deci[2].nil? ? tenth_place = 0 : tenth_place = two_deci[2]\n two_deci[3].nil? ? unit_place = 0 : unit_place = two_deci[3]\n\n\n puts \"#{num_str[0]}.#{tenth_place}#{unit_place}E#{power.to_s}\" #Final desired output\n\nend", "def number_to_scientific(num,precision=1)\n numberString=\"%.#{precision}e\" % num\n splitString=numberString.split(\"e\")\n exponent=splitString[1].to_i.to_s\n html=splitString[0]+\" x 10<sup>\"+exponent+\"</sup>\"\n return html.html_safe\n end", "def exp(base, power)\n return 1 if power == 0\n return base if power == 1\n base * exp(base, (power-1))\nend", "def rec_exp_2(base, power)\nend", "def power(base, exponent)\n return nil if exponent < 0\n\n return 1 if exponent == 0\n\n value = base\n\n (exponent - 1).times do value *= base end\n\n value\nend", "def exp10\n Double.new(10**self.to_f)\n end", "def exp_for_level(level)\n return (DND::EXP_FOR_LEVEL[level] * 1000).to_i\n end", "def exponents\n\tputs \"what number would you like raise and to what power do you want to raise it?\"\n\texp_number = gets.chomp\n\tpower = gets.chomp\n\tif power.to_i > 15\n\t\tputs \"Okay, let's be real, I'm not THAT smart. What're you trying to break me?\"\n\telse \n\t\tputs exp_number.to_f ** power.to_f\n\tend\nend", "def exponential(value1)\n result = Math.exp(value1)\n puts \"The result is #{ result }\"\nend", "def decimal_part(digits: 10)\n num = ''\n if digits > 1\n num = non_zero_digit\n digits -= 1\n end\n leading_zero_number(digits: digits) + num.to_s\n end", "def exponentiation(base, exp)\n return nil if exp < 0\n return 1 if exp == 0\n return base if exp == 1\n\n if exp.even?\n (exponentiation(base, exp / 2)**2)\n else\n base * (exponentiation(base, (exp - 1) / 2)**2)\n end\nend", "def scientificNotation(num)\r\n data = \"%.16e\" % num\r\n result = (data.split(\"e+\")[1].to_i() > 20) ? (data): (num)\r\n return result\r\n end", "def power(bas,exponent)\n i = 1\n resultat = bas\n while i < exponent\n resultat *= bas\n i += 1\n end\n return resultat\nend", "def xnum(data)\n data.unpack('E').first\n end", "def exp1(base, power)\n return 1 if power == 0\n base * exp1(base, power - 1)\nend", "def power_level(x, y)\n ((((x + 10) * y ) + $serial_num) * (x + 10)).digits[2] - 5\nend", "def power_level(x, y)\n ((((x + 10) * y ) + $serial_num) * (x + 10)).digits[2] - 5\nend", "def exp1(base, power)\n return 1 if power == 0\n base * exp1(base, power - 1)\nend", "def split_number(number)\n coefficient, exponent = Kernel.format(\"%e\", number).split(\"e\")\n\n [coefficient.to_f, exponent.to_i]\n end", "def exponent1(base, num)\n return 1 if num == 0\n return base if num == 1\n x = base * exponent1(base, num-1)\n\nend", "def exp1(num, exponent)\n return 1 if exponent == 0\n # return ( 1 / exp1(num, exponent - 1) )\n\n num * exp1(num, exponent - 1)\nend", "def power(base, exponent)\n exponent <= 1 ? base : base * (power base, (exponent - 1))\nend", "def DISABLED_test_exponent_token\n assert_tokenises_as '^', ExponentOpToken.instance\n assert_tokenises_as ' ^', ExponentOpToken.instance\n assert_tokenises_as ' ^ ', ExponentOpToken.instance\n end", "def decimal_part(digits: T.unsafe(nil)); end", "def power(base, exponent)\n if !base.is_a?(Integer) || !exponent.is_a?(Integer)\n return nil\n else \n product = 1\n (1..exponent).each do\n product *= base\n end\n return product\n end\nend", "def power(base,exponent)\n i = 0\n output = 1\n while i < exponent\n output = output * base\n i += 1\n end\n return output\nend", "def sbc_a_e\n end", "def decode_int\n # @index is at the position of the the 'i' so we just need everything between it\n # and the next appearance of 'e'.\n index_of_last_digit = self[@index..self.length - 1].index 'e'\n number_string = self[(@index + 1)..(@index + index_of_last_digit - 1)]\n @index += index_of_last_digit\n number_string.to_i\n end", "def exponentiation_one(base, exp)\n return nil if exp < 0\n return 1 if exp == 0\n return base if exp == 1\n\n base * exponentiation_one(base, exp - 1)\nend", "def exp_v_1(num, pow)\n return 1 if pow === 0\n return num * exp_v_1(num, pow - 1)\nend", "def derive(coefficient, exponent)\n result1 = coefficient * exponent\n result2 = result1.to_s\n result2\n result3 = result2 + 'x'\n result3\n result4 = result3 + \"^\"\n result4\n new_exponent = exponent - 1\n exponent_converted = new_exponent.to_s\n result5 = result4 + exponent_converted\n end", "def power(base,exponent)\n i=1\n output = base\n while i<exponent\n output = output*base\n i += 1\n end\n return output\nend", "def exponent1(base, power)\n # if power equals zero , then 1 else (base * method(base,power - 1))\n if power == 0\n return 1\n else\n (base * exponent1(base, power - 1))\n end\nend", "def power_digit_sum(base, exponent)\n\t(2 ** 1000).to_s.split(\"\").inject(0) {|sum, n| sum + n.to_i}\nend", "def power(base, exponent)\n total = 1\n\n # we don't actually need this\n # if exponent == 1\n # total = base\n # elsif exponent == 0\n # total = 1\n # end\n # But what if the exponent were negative?\n\n (exponent.abs).times do\n total = multiply(total, base)\n end\n\n if exponent < 0\n total = (1.0 / total).to_r\n end\n\n total\nend", "def power(base, exponent)\n result = 1\n exponent.times { result *= base }\n result\nend", "def to_i(base=10) end", "def power(number, exponent)\n i = 0\n output = 1\n new_exponent = exponent.round\n if new_exponent < 0\n output = 1/power(number,(new_exponent * -1)).to_f\n return output\n end\n if new_exponent == 0\n return output\n end\n while i < new_exponent\n output = output * number\n i += 1\n end\n return output\nend", "def exp_r1 (base, exponent)\n if exponent == 0\n return 1\n else\n base * exp_r1(base, exponent - 1)\n end\nend", "def log10_sig_exp(l10)\n exp = l10.floor\n sig = 10**(l10 - exp)\n [sig, exp]\nend", "def exponent(a, b)\n return 1 if b.zero?\n\n array_of_values = []\n\n b.abs.times do\n array_of_values.push(a)\n end\n\n calculated_multiplier = array_of_values.reduce(:*)\n\n b > 0 ? calculated_multiplier : \"1/#{calculated_multiplier}\"\nend", "def exp1(base, num)\n return 1 if num == 0\n return base if num == 1\n expo = base * exp1(base, num-1)\nend", "def power(base, exp)\n return base ** exp\nend", "def get_coeffs\n (\"%0#{DIGITS}d\" % [self]).chars.map(&:to_i)\n end", "def exponentm(num_1,num_2)\n return num_1 ** num_2\nend", "def xform(value)\n value.to_i - 1 \n end", "def dec_e\n end", "def get_decoding_exponent(display_exponent = 2.2)\n gamma = get_data\n return 1.0/(gamma * display_exponent)\n end", "def expo(x) ([x].pack(\"G\")[0..1].unpack(\"S>\")[0] & 0b0111111111110000) >> 4; end", "def exp(n, i)\n n ** i\nend", "def exp(v); Math.exp(v); end", "def power(num, exponent)\n result = num\n (2..exponent).each { result = multiply(result, num) }\n result\nend", "def convert_base(number, base)\n \n exponents = []\n n = 0 # Exponent / number position\n\n # Find the appropriate exponent size larger than the number \n while number != 0\n # puts \"Current top number: #{number}\"\n # puts \"Current exponent value: #{n}\"\n running_total = 0\n if number < base ** n\n # puts \"Base ** n: #{base ** n}\" \n # puts \"If loop current number: #{number}\"\n running_total += base ** (n-1)\n # puts \"If loop running_total: #{running_total}\"\n number = number - running_total\n # puts \"If loop number after subtraction: #{number}\"\n if exponents[n-1].nil? then exponents[n-1] = 1 else exponents[n-1] += 1 end\n n = 0\n else\n n += 1\n end\n end\n\n exponents.inspect\n\nend", "def pow(base, exponent)\n\n result = 1\n\n exponent.times do\n result = result * base\n end\n\n return result\n \n end", "def powerdigitsum(exponent)\n product = 1\n sum = 0\n \n product = 2 ** exponent\n \n array = product.to_s.split(//)\n array.each do |el|\n sum += el.to_i\n end\n\n p sum\nend", "def power(base, exponent)\n total = 1\n\n exponent.times do\n total *= base\n end\n\n return total\nend", "def exp\n @exp ||= ComplexNumber.new((Math::E**real).round(15), 0) \\\n * ComplexNumber.new(\n Math.cos(imaginary).round(15),\n Math.sin(imaginary).round(15)\n )\n end", "def pow(base, exponent)\n\n result = 1\n exponent.times do \n result = base.to_i * result\n end\n result\nend", "def power (num, exponent)\n if exponent == 0\n result = 1\n elsif exponent == 1\n result = num\n else\n result = multiply(num, num)\n (exponent-2).times { result = multiply(result, num) }\n result\n end\nend", "def power(base, exponent)\n return (0...exponent).inject(1) { |memo| memo * base } if exponent >= 0\n (exponent...0).inject(1.0) { |memo| memo * 1/base }\nend", "def power(base, exp)\n i = 0\n output = 1\n\n while i < exp\n output *= base\n i += 1\n end\n return output\nend", "def exp_ver_two(base, exponent)\n return 1 if exponent.zero?\n\n if exponent.even?\n n = exp_ver_two(base, exponent / 2)\n n * n\n else\n n = exp_ver_two(base, (exponent - 1) / 2)\n base * (n * n)\n end\nend", "def exp1(b, e)\n return 1 if e == 0\n exp1(b, e - 1) * b\nend", "def power(base, exponent)\n i = \n output = 1\n while i <= exponent\n output = output * base\n i += 1\n end \n return output\nend", "def e_built_in(number_of_decimals)\n\tprintf(\"%.#{number_of_decimals}f\", E(number_of_decimals))\n\tprintf \"\\n\"\nend" ]
[ "0.6949241", "0.6768834", "0.6450049", "0.6366699", "0.62178034", "0.61935323", "0.6123252", "0.60276663", "0.6018495", "0.5966022", "0.59605044", "0.59280956", "0.5922054", "0.589215", "0.5840528", "0.58131653", "0.5787539", "0.57789415", "0.5742632", "0.5741708", "0.5741708", "0.5741708", "0.5723573", "0.5710017", "0.569473", "0.5649356", "0.5607009", "0.5606442", "0.5557676", "0.5539759", "0.55293316", "0.55217534", "0.549721", "0.5467548", "0.54646754", "0.5449089", "0.5434679", "0.54079294", "0.5395231", "0.539446", "0.5383491", "0.53624123", "0.53530633", "0.53451884", "0.53418154", "0.53402656", "0.5338926", "0.53368926", "0.53224385", "0.5315312", "0.53004", "0.53004", "0.52948076", "0.52898115", "0.5264934", "0.52616644", "0.52593535", "0.5248902", "0.52483684", "0.5244326", "0.52422154", "0.5239234", "0.5234297", "0.5218027", "0.5218026", "0.52119404", "0.52017903", "0.5189383", "0.5174006", "0.51569617", "0.5140002", "0.51365346", "0.5131694", "0.51313287", "0.5128506", "0.5123856", "0.5119584", "0.5113528", "0.51122653", "0.51078826", "0.50883156", "0.50852245", "0.50785613", "0.5076776", "0.5062556", "0.50592417", "0.5049122", "0.50478053", "0.50309837", "0.50087816", "0.5008021", "0.49937767", "0.49935424", "0.49935162", "0.49866223", "0.49847683", "0.49831137", "0.49731806", "0.4937332", "0.4928376" ]
0.7587559
0
7.8.3 DecimalDigit :: one of 0 1 2 3 4 5 6 7 8 9
def decimal_digits pos0 = @pos if (code = @codes[@pos]) >= 0x30 and code <= 0x39 @pos += 1 while code = @codes[@pos] and code >= 0x30 and code <= 0x39 @pos += 1 end return @codes[pos0...@pos].pack("U*") else nil end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decimal_part(digits: 10)\n num = ''\n if digits > 1\n num = non_zero_digit\n digits -= 1\n end\n leading_zero_number(digits: digits) + num.to_s\n end", "def non_zero_digit; end", "def to_digit\n return NUMBER[self] if self <= 9 && self >= 0\n NUMBER[0]\n end", "def decimal_part(digits: T.unsafe(nil)); end", "def double_digit(num)\n if num.to_s.length < 2\n \"0\" + num.to_s\n else\n num.to_s\n end\n end", "def to_decimal\n octal_string =~ INVALID_OCTAL ? 0 : calculate\n end", "def digit; end", "def check_digit\n dv\n end", "def two_digits_number(x)\n x < 10 ? \"0#{x}\" : x.to_s\n end", "def initialize(digits)\n @value = digits.to_s.to_i\n end", "def decimal(digits, base)\n e = digits.size - 1\n v = 0\n digits.each do |n|\n v += n.to_i * base**e\n e -= 1\n end\n v\n end", "def dig(dig,num)\n\tnum/dig%10\nend", "def to_i(base=10) end", "def to_decimal\n return @bin.each_char.inject(0) { |sum, digit| sum * 2 + digit.to_i }\n end", "def digits(text)\n return text.digits if text.is_a?(self)\n number = text.to_s.gsub(/\\D/,'')\n number = \"1#{number}\" if number.length == 10\n number\n end", "def decimal_literal\n pos0 = @pos\n code = @codes[@pos]\n\n if code.nil?\n return nil\n elsif code == 0x2e #.\n @pos += 1\n f = decimal_digits\n if f.nil? #=> this period is punctuator\n @pos = pos0 + 1\n return ECMA262::PUNC_PERIOD\n end\n if (code = @codes[@pos]) == 0x65 || code == 0x45\n @pos += 1\n e = exponent_part\n end\n if identifier_start?(@codes[@pos])\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n end\n\n return ECMA262::ECMA262Numeric.new('0', f, e)\n elsif code == 0x30 # zero\n i = \"0\"\n @pos += 1\n if @codes[@pos] == 0x2e #.\n @pos += 1\n f = decimal_digits\n if (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E\n @pos += 1\n e = exponent_part\n end\n elsif (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E\n @pos += 1\n e = exponent_part\n end\n if identifier_start?(@codes[@pos])\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n end\n\n return ECMA262::ECMA262Numeric.new(i, f, e)\n elsif code >= 0x31 and code <= 0x39\n i = decimal_digits\n if @codes[@pos] == 0x2e #.\n @pos += 1\n f = decimal_digits\n if (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E\n @pos += 1\n e = exponent_part\n end\n elsif (code = @codes[@pos]) == 0x65 || code == 0x45 #e or E\n @pos += 1\n e = exponent_part\n end\n if identifier_start?(@codes[@pos])\n raise ParseError.new(\"The source character immediately following a NumericLiteral must not be an IdentifierStart or DecimalDigit\", self)\n end\n\n return ECMA262::ECMA262Numeric.new(i, f, e)\n end\n\n nil\n end", "def octal(digits)\n string(OCTAL_DIGITS, digits)\n end", "def digits(arg)\n arg.to_s.gsub(/\\D+/,'')\n end", "def digitize(n)\n n.digits\nend", "def isDigit(c)\n ('0' .. '9').include?(c)\nend", "def set_last_digits\n end", "def value_of(digits, in_base:)\n number = 0\n\n digits.to_s.chars.each do |digit|\n number = in_base * number + CHARS.index(digit)\n end\n\n number\n end", "def decimal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 5)\n\n type = DECIMAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 25:5: ( '-' )? '1' .. '9' ( '0' .. '9' )*\n # at line 25:5: ( '-' )?\n alt_9 = 2\n look_9_0 = @input.peek(1)\n\n if (look_9_0 == ?-) \n alt_9 = 1\n end\n case alt_9\n when 1\n # at line 25:5: '-'\n match(?-)\n\n end\n match_range(?1, ?9)\n # at line 25:20: ( '0' .. '9' )*\n loop do #loop 10\n alt_10 = 2\n look_10_0 = @input.peek(1)\n\n if (look_10_0.between?(?0, ?9)) \n alt_10 = 1\n\n end\n case alt_10\n when 1\n # at line 25:21: '0' .. '9'\n match_range(?0, ?9)\n\n else\n break #loop 10\n end\n end\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 5)\n\n end", "def get_first_digit n \n n / 10**length(n) \nend", "def numeric_literal\n hex_integer_literal || octal_integer_literal || decimal_literal\n end", "def number(digits: T.unsafe(nil)); end", "def decode_digit(cp)\n cp - 48 < 10 ? cp - 22 : cp - 65 < 26 ? cp - 65 : cp - 97 < 26 ? cp - 97 : BASE\n end", "def decode_digit(cp)\n cp - 48 < 10 ? cp - 22 : cp - 65 < 26 ? cp - 65 : cp - 97 < 26 ? cp - 97 : BASE\n end", "def octal_val(decimal)\n octal = 0\n i = 0\n num = decimal\n\n loop do\n break if num <= 0\n octal += (num.divmod(8)[1] * (10 ** i))\n num = num.divmod(8)[0]\n i += 1\n end\n\n octal\nend", "def add_zero_to_1_to_9(str)\n str.sub(/(?<=\\D)\\d\\D?$/, '0\\0' )\n end", "def super_digit(n)\n n < 10 ? n : super_digit(n / 10 + n % 10)\nend", "def decimal\n normalized.hex\n end", "def digits\n @digits ||= numbers.insert(3, digit(numbers).to_s)\n end", "def to_2digit(number)\n if number < 9\n number=\"0\"+number.to_s\n else\n number=number.to_s\n end\n return number\nend", "def check_digit(input)\n input = letter_sub(input)\n input = to_int_array(input)\n input = multiply_by_2(input)\n input = sums_units_and_tens(input)\n input = sums_all_elements(input)\n input = substracts_from_next_ten(input)\n input\n end", "def digit(digit_number)\n digits[digit_number - 1].to_i\n end", "def checkdigit(number)\n\t\t\tdigits = number.to_s.reverse.scan(/\\d/).map { |x| x.to_i }\n\t\t\tdigits = digits.each_with_index.map { |d, i|\n\t\t\t d *= 2 if i.even?\n\t\t\t d > 9 ? d - 9 : d\n\t\t\t}\n\t\t\tsum = digits.inject(0) { |m, x| m + x }\n\t\t\tmod = 10 - sum % 10\n\t\t\tmod==10 ? 0 : mod\n \tend", "def to_digits(d)\n digits = []\n while d > 0\n digits << d % 10\n d /= 10\n end\n digits\nend", "def leading_zero_number(digits: T.unsafe(nil)); end", "def base_converter(num, b)\n return \"\" if num == 0\n digits = %w(0 1 2 3 4 5 6 7 8 9 a b c d e f)\n\n base_converter(num/b, b) + digits[num%b]\n end", "def to_numeric(anything)\n\n num = BigDecimal.new(anything.to_s)\n if num.frac == 0\n num.to_i\n else\n num.to_r\n end\n end", "def singles_digits(digit)\n\t\n\tcase digit \n\t\t\n\t\twhen 1, 2, 6\n\t\t\treturn 3\n\t\twhen 3, 7, 8\n\t\t\treturn 5\n\t\twhen 4, 5, 9\n\t\t\treturn 4\n\t\telse\n\t\t\treturn 0\n\tend\nend", "def check_digit(number: 0)\n sum = 0\n number.to_s.chars.each_with_index do |digit, idx|\n position = idx + 1\n sum += (digit.to_i * (11 - position))\n end\n result = 11 - (sum % 11)\n\n # A value of 11 is considered the same as 0\n # See https://en.wikipedia.org/wiki/NHS_number\n return 0 if result == 11\n\n result\n end", "def digits(number)\n number.to_s.chars.map(&:to_i)\nend", "def to_decimal\n @value.digits.map.with_index do |current, index|\n current * (3 ** index)\n end.sum\n end", "def decimals(a)\n num = 0\n while(a != a.to_i)\n num += 1\n a *= 10\n end\n num\n end", "def digit \n\t\n\t$cst.add_branch(\"digit\")\n\t\n\tmatch_token(\"T_DIGIT\", $tokens[$index])\n\t\n\t$cst.ascend\n\t\nend", "def super_digit(n)\n return n if n < 10 \n return super_digit(n.to_s.chars.map(&:to_i).sum)\nend", "def super_digit(n)\n #has to be a single digit \n if n < 10 \n return n\n else \n return super_digit(n.digits.sum)\n end \n \n \nend", "def base_converter(num, b)\n return num.to_s if [0,1].include?(num)\n\n digits = %w(0 1 2 3 4 5 6 7 8 9 a b c d e f)\n base_converter(num/b, b) + digits[num % b]\nend", "def to_digit(keep=nil)\n result = self\n until result < 10 or result == keep\n result = result.digitize.inject(:+)\n end\n result\n end", "def normalize(digits_str)\n\t\t\tdigits_str.to_i\n\t\tend", "def expected_check_digit\n digits.last\n end", "def super_digit(n)\n return n if n / 10 == 0\n return super_digit(digit_helper(n))\nend", "def digits\n regex(/\\A\\d+\\z/, 'string.digits', 'must contain only digits 0-9')\n end", "def number\n number = number.to_s\n if number.includes?('.')\n number = number.to_f\n else\n number = number.to_i\n end\nend", "def _digits\n\n _save = self.pos\n while true # choice\n _tmp = match_string(\"0\")\n break if _tmp\n self.pos = _save\n\n _save1 = self.pos\n while true # sequence\n _tmp = scan(/\\A(?-mix:[1-9])/)\n unless _tmp\n self.pos = _save1\n break\n end\n _tmp = scan(/\\A(?-mix:[0-9]*)/)\n unless _tmp\n self.pos = _save1\n end\n break\n end # end sequence\n\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_digits unless _tmp\n return _tmp\n end", "def base_converter(num, b)\n\n return num.to_s if [0, 1].include?(num)\n\n digits = %w(0123456789abcdef)\n base_converter(num/b, b) + digits[num % b]\n\nend", "def numerical_representation\n @numerical_representation ||= digits.scan(/\\d{11}/).collect {|p| \"#{p} #{digit(p)}\" }\n end", "def represent(number, in_base:)\n digits = \"\"\n\n while number > 0\n digits = CHARS[number % in_base] + digits\n number = number / in_base\n end\n\n digits\n end", "def set_last_digits\n if number\n number.to_s.gsub!(/\\s/,'')\n self.digits ||= number.to_s.length <= 4 ? number : number.to_s.slice(-4..-1)\n end\n end", "def reduce_digit(_production, _range, _tokens, _children)\n char_shorthand('d')\n end", "def reverse_digits(int)\n \nend", "def is_dec(str)\n\tstr == \".\"\nend", "def to_i\n 9999\n end", "def numerify(number_string, leading_zero: T.unsafe(nil)); end", "def validate_number(input)\n digits = input.gsub /[^\\d]/, ''\n digits = \"1#{digits}\" unless digits =~ /^1/\n digits = \"+#{digits}\" unless digits =~ /^\\+/\n digits = nil unless digits.length == 12\n digits\n end", "def digit!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 53 )\n\n type = DIGIT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 352:8: '0' .. '9'\n match_range( 0x30, 0x39 )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 53 )\n\n end", "def super_digit(n)\n return n if n < 10\n super_digit(n.digits.sum)\nend", "def super_digit(n)\n return n if n < 10\n super_digit(n.digits.sum)\nend", "def super_digit(n)\n return n if n < 10\n return super_digit(n.digits.sum)\nend", "def leading_zero_number(digits: 10)\n \"0#{(2..digits).collect { digit }.join}\"\n end", "def unit_digit_string(digit)\n case digit\n when 0 then 'zero'\n when 1 then 'one'\n when 2 then 'two'\n when 3 then 'three'\n when 4 then 'four'\n when 5 then 'five'\n when 6 then 'six'\n when 7 then 'seven'\n when 8 then 'eight'\n when 9 then 'nine'\n end\nend", "def number(placeholder = nil)\n generate(placeholder, NUMBER_CHARACTERS)\n end", "def find_digit_amount(num)\n\treturn num.to_s.length()\nend", "def super_digit(n)\n if n >= 0 && n < 10 \n return n\n end\n digits_of_n = n.digits\n sum_digits = digits_of_n.sum\n\n return super_digit(sum_digits)\nend", "def number\n result = ''\n while @current_char and @current_char =~ /[[:digit:]]/\n result << @current_char\n advance\n end\n\n if @current_char == '.'\n result << @current_char\n advance\n while @current_char and @current_char =~ /[[:digit:]]/\n result << @current_char\n advance\n end\n Token.new(:real_const, result.to_f)\n else\n Token.new(:integer_const, result.to_i)\n end\n end", "def digit!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 37 )\n\n type = DIGIT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 136:8: '0' .. '9'\n match_range( 0x30, 0x39 )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 37 )\n\n end", "def super_digit(n)\n return n if n < 10\n\n return super_digit(n.digits.sum)\nend", "def super_digit(n)\n return n if n < 10\n\n sum_digits = 0\n until n == 0\n sum_digits += n % 10\n n /= 10\n end\n\n super_digit(sum_digits)\nend", "def persistence(n)\n n < 10 ? 0 : 1 + persistence(n.digits.reduce(&:*))\nend", "def super_digit(n)\n return n if n < 10\n sum = n.to_s.chars.map(&:to_i).sum\n return super_digit(sum)\nend", "def to_number(value)\n return value.to_i unless value =~ /\\./\n return value.to_f if value =~ /\\./ \n end", "def non_zero_digit\n rand(1..9)\n end", "def double_digit_value(n)\n n *= 2\n # if a digit in the id sequence has to be doubled then\n # it may have 2 digits\n # if that is the case, we need to add the 2 digits separately\n if n >= 10\n # the first digit of n*2 is always one since 0 <= n <= 9\n # the second digit of n*2 is (n*2) % 10\n 1 + (n % 10)\n else\n n\n end\n end", "def binary(digits)\n string(BINARY_DIGITS, digits)\n end", "def initialize(digits)\n @digits = digits\n end", "def decimals; num_dec; end", "def super_digit(n)\n\n return n if n < 10\n super_digit(n.digits.sum)\n\nend", "def super_digit(n)\n while n >= 10\n n = n % 10 + super_digit(n / 10)\n end\n return n\nend", "def speak_as_digits(element)\n speak_as(element, /[0-9]/, 'digits', method(:operation_speak_as_digits))\n end", "def prod_digits(s)\n prod = 1\n s.each_char { |c| prod *= c.to_i }\n prod\nend", "def super_digit(n)\n return n if n.to_s.length == 1 \n\n return super_digit(digit_helper(n))\nend", "def digits(num)\n return num.to_s.split(\"\").map { |i| i.to_i }\nend", "def dig_number(number)\n arr_dig = number.to_s.chars\n count = arr_dig.size\n arr_dig.map {|dig| dig.to_i ** count} \nend", "def add_digits(number)\nend", "def digits(input)\n verify_int(input).to_s.split(//)\nend", "def test_and_pass_1919fdjksl3_return_false\r\n\t\tassert_equal(false, all_numeric_digits?(\"1919fdjksl3\"))\r\n\tend", "def calculate_check_digit\n sum = digits.first(12).each_with_index.sum do |digit, index|\n index.even? ? digit : (digit * 3)\n end\n remainder = sum % 10\n remainder.zero? ? remainder : (10 - remainder)\n end", "def digits(cc_num)\n cc_num.to_i.digits\nend" ]
[ "0.7206913", "0.69871384", "0.6909503", "0.6789734", "0.66710514", "0.66146016", "0.6581263", "0.65359426", "0.6485555", "0.64837825", "0.6482358", "0.64792246", "0.64187557", "0.6418712", "0.6403715", "0.64019895", "0.6362964", "0.6342442", "0.6269263", "0.62669444", "0.626579", "0.6251956", "0.6242524", "0.6231571", "0.62311697", "0.62224275", "0.6211303", "0.6211303", "0.6206206", "0.6192256", "0.618865", "0.61849254", "0.6182719", "0.61804867", "0.6125833", "0.61248744", "0.6074207", "0.6040896", "0.60356593", "0.6023177", "0.5992776", "0.5990207", "0.5985502", "0.5979305", "0.5966783", "0.59665316", "0.5964018", "0.5957295", "0.59293234", "0.5912746", "0.5912068", "0.59108984", "0.5899926", "0.5899726", "0.5892304", "0.58882654", "0.5884541", "0.588208", "0.5875515", "0.586696", "0.58635646", "0.58631897", "0.58597916", "0.58533794", "0.58511025", "0.5850741", "0.5845386", "0.58451504", "0.5844946", "0.5844946", "0.58287764", "0.5823116", "0.5821207", "0.58202183", "0.58189654", "0.5806697", "0.58047473", "0.5804022", "0.58001626", "0.579152", "0.5786824", "0.5764813", "0.5763829", "0.57626975", "0.5761911", "0.5760895", "0.575785", "0.57525945", "0.57486236", "0.5745733", "0.5743887", "0.5743158", "0.5740444", "0.5736838", "0.57343835", "0.5733241", "0.57311827", "0.5730925", "0.57291424", "0.57230633" ]
0.6937585
2
Tests next literal is StringLiteral or not. If literal is StringLiteral return ECMA262::ECMA262String object and forward lexical parser position. Otherwise return nil and position is not changed.
def string_literal # StringLiteral :: # " DoubleStringCharactersopt " # ' SingleStringCharactersopt ' # # DoubleStringCharacters :: # DoubleStringCharacter DoubleStringCharactersopt # # SingleStringCharacters :: # SingleStringCharacter SingleStringCharactersopt # # DoubleStringCharacter :: # SourceCharacter but not one of " or \ or LineTerminator # \ EscapeSequence # LineContinuation # # SingleStringCharacter :: # SourceCharacter but not one of ' or \ or LineTerminator # \ EscapeSequence # LineContinuation # if (code = @codes[@pos]) == 0x27 #' term = 0x27 elsif code == 0x22 #" term = 0x22 else return nil end @pos += 1 pos0 = @pos str = [] while (code = @codes[@pos]) if code.nil? raise ParseError.new("no `#{term}' at end of string", self) elsif line_terminator?(code) raise ParseError.new("string has line terminator in body", self) elsif code == 0x5c #\ @pos += 1 str.push(escape_sequence) elsif code == term @pos += 1 return ECMA262::ECMA262String.new(str.compact.pack("U*")) else @pos += 1 str.push(code) end end nil end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def string_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 42)\n\n type = STRING_LITERAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 486:4: '\\\\'' LITERAL_CHAR ( LITERAL_CHAR )* '\\\\''\n match(?\\')\n literal_char!\n # at line 486:22: ( LITERAL_CHAR )*\n loop do #loop 5\n alt_5 = 2\n look_5_0 = @input.peek(1)\n\n if (look_5_0.between?(0x0000, ?&) || look_5_0.between?(?(, 0xFFFF)) \n alt_5 = 1\n\n end\n case alt_5\n when 1\n # at line 486:22: LITERAL_CHAR\n literal_char!\n\n else\n break #loop 5\n end\n end\n match(?\\')\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 42)\n\n end", "def read_string\n\t\t\n\t\t# TODO: Add the ability to have escape characters.\n\t\tstart_char \t= @chars.next # Don't add quotation marks to the string value\n\t\tline_num \t= @chars.get_cur_line\n\t\tcol_num \t= @chars.get_cur_col\n\t\ttok_val \t= ''\n\n\t\twhile true\n\t\t\tbegin\n\t\t\t\tchar = @chars.peak\n\t\t\trescue EOFError\n\t\t\t\traise \"LEXER ERROR: At line: #{line_num}, col: #{col_num} >> String does not end.\"\n\t\t \t\treturn nil\n\t\t \tend\n\n\t\t \tif char == start_char\n\t\t \t\treturn Token.new(\"String\", tok_val, line_num, col_num)\n\t\t \tend\n\t\t \ttok_val += char\n\t\t \[email protected]\n\t\tend\n\tend", "def parse_lit\n case l.front.type\n when :str then parse_str_lit\n when :chr then parse_char_lit\n when :num then parse_num_lit\n else\n error \"expected a literal\"\n end\n end", "def read_string(token)\n opening_char = @marker.character\n token.kind = case opening_char\n when ?' then :single_string_lit\n when ?\" then :double_string_lit\n end\n\n escape = false\n chars = []\n\n while current = read_next()\n if escape\n current = case current\n when ?x, ?X\n # unicode hex escape\n peeked = peek_next()\n if !self.class::isxdigit(peeked)\n raise_error(:malformed_unicode_escape,\n \"Malformed unicode literal in string - no hex code provided.\")\n end\n\n hexnums = current == ?x ? 4 : 8\n\n current = 0\n begin\n current = current << 4 | (case peeked\n when ?A, ?a then 0xA\n when ?B, ?b then 0xB\n when ?C, ?c then 0xC\n when ?D, ?d then 0xD\n when ?E, ?e then 0xE\n when ?F, ?f then 0xF\n when ?0 then 0x0\n when ?1 then 0x1\n when ?2 then 0x2\n when ?3 then 0x3\n when ?4 then 0x4\n when ?5 then 0x5\n when ?6 then 0x6\n when ?7 then 0x7\n when ?8 then 0x8\n when ?9 then 0x9\n end)\n read_next()\n peeked = peek_next()\n hexnums -= 1\n end while self.class::isxdigit(peeked) && hexnums > 0\n current.chr(Encoding::UTF_8)\n\n when ?r then ?\\r\n when ?n then ?\\n\n when ?t then ?\\t\n when ?0 then ?\\0\n when ?b then ?\\b\n when ?a then ?\\a\n when ?f then ?\\f\n when ?v then ?\\v\n else current\n end\n escape = false\n else\n if current == opening_char\n break\n elsif current == ?\\\\\n escape = true\n next\n end\n end\n\n chars << current\n end\n\n raise_error(:unterminated_string, \"Unterminated string\") if !current\n\n token.value = chars.join('')\n end", "def string?\n @kind == :double_string_lit || @kind == :single_string_lit\n end", "def get_target_string_literal_from_antlrstring_literal(generator, literal)\n literal = RJava.cast_to_string(Utils.replace(literal, \"\\\"\", \"\\\\\\\"\"))\n buf = StringBuffer.new(literal)\n buf.set_char_at(0, Character.new(?\".ord))\n buf.set_char_at(literal.length - 1, Character.new(?\".ord))\n buf.insert(0, Character.new([email protected]))\n return buf.to_s\n end", "def delimited_string_literal?(node); end", "def string_token\n quote = @chunk[0]\n string =\n case quote\n when ?' then @chunk.match(SIMPLESTR)[0]\n when ?\" then balanced_string(@chunk, '\"')\n end\n return unless string\n if string.index('#{')\n interpolate_string(string, str_offset: 1, lexed_length: string.size)\n else\n token(:STRING, escape(string), 0, string.size)\n end\n string.size\n end", "def _literal\n\n _save = self.pos\n while true # choice\n _tmp = apply(:_number)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_string)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_literal unless _tmp\n return _tmp\n end", "def build_string_token\n scan_pos = scanner.pos\n line = @lineno\n column_start = scan_pos - @line_start\n\n literal = scanner.scan(/\"[^\"]*\"/)\n unless literal\n pos_start = \"line #{line}:#{column_start}\"\n raise ScanError, \"Error: [#{pos_start}]: Unterminated string.\"\n end\n\n pos = Rley::Lexical::Position.new(line, column_start)\n basic_string = TOMLString.new(literal[1..-2])\n lexeme = scanner.string[scan_pos..scanner.pos - 1]\n Rley::Lexical::Literal.new(basic_string, lexeme, 'STRING', pos)\n end", "def get_target_string_literal_from_antlrstring_literal(codegen, literal)\n buf = Grammar.get_unescaped_string_from_grammar_string_literal(literal)\n prefix = \"\\\"\"\n if (codegen.attr_grammar.get_max_char_value > 255)\n prefix = \"L\\\"\"\n end\n return prefix + RJava.cast_to_string(escape_string(buf.to_s)) + \"\\\"\"\n end", "def char_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 41)\n\n type = CHAR_LITERAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 482:4: '\\\\'' LITERAL_CHAR '\\\\''\n match(?\\')\n literal_char!\n match(?\\')\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 41)\n\n end", "def string_token\n case @chunk[0]\n when \"'\"\n return nil unless md = SQUOTESTR.match(@chunk)\n string = md.to_a[0]\n token :String, string\n return string.length\n when '\"'\n return nil unless md = DQUOTESTR.match(@chunk)\n string = md.to_a[0]\n token :String, string\n return string.length\n else\n return nil\n end\n end", "def test_string_literal_empty\n check(C::StringLiteral, <<-EOS)\n |\"\"\n EOS\n end", "def string!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 14)\n\n type = STRING\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 142:3: ( '\\\"' (~ ( '\\\\\\\\' | '\\\"' ) | '\\\\\\\\' . )* '\\\"' | '\\\\'' (~ ( '\\\\\\\\' | '\\\\'' ) | '\\\\\\\\' . )* '\\\\'' )\n alt_6 = 2\n look_6_0 = @input.peek(1)\n\n if (look_6_0 == ?\") \n alt_6 = 1\n elsif (look_6_0 == ?\\') \n alt_6 = 2\n else\n nvae = NoViableAlternative(\"\", 6, 0)\n raise nvae\n end\n case alt_6\n when 1\n # at line 142:5: '\\\"' (~ ( '\\\\\\\\' | '\\\"' ) | '\\\\\\\\' . )* '\\\"'\n match(?\")\n # at line 142:10: (~ ( '\\\\\\\\' | '\\\"' ) | '\\\\\\\\' . )*\n while true # decision 4\n alt_4 = 3\n look_4_0 = @input.peek(1)\n\n if (look_4_0.between?(0x0000, ?!) || look_4_0.between?(?#, ?[) || look_4_0.between?(?], 0xFFFF)) \n alt_4 = 1\n elsif (look_4_0 == ?\\\\) \n alt_4 = 2\n\n end\n case alt_4\n when 1\n # at line 142:12: ~ ( '\\\\\\\\' | '\\\"' )\n if @input.peek(1).between?(0x0000, ?!) || @input.peek(1).between?(?#, ?[) || @input.peek(1).between?(?], 0x00FF)\n @input.consume\n else\n mse = MismatchedSet(nil)\n recover(mse)\n raise mse\n end\n\n\n\n when 2\n # at line 142:31: '\\\\\\\\' .\n match(?\\\\)\n match_any\n\n else\n break # out of loop for decision 4\n end\n end # loop for decision 4\n match(?\")\n\n when 2\n # at line 143:5: '\\\\'' (~ ( '\\\\\\\\' | '\\\\'' ) | '\\\\\\\\' . )* '\\\\''\n match(?\\')\n # at line 143:10: (~ ( '\\\\\\\\' | '\\\\'' ) | '\\\\\\\\' . )*\n while true # decision 5\n alt_5 = 3\n look_5_0 = @input.peek(1)\n\n if (look_5_0.between?(0x0000, ?&) || look_5_0.between?(?(, ?[) || look_5_0.between?(?], 0xFFFF)) \n alt_5 = 1\n elsif (look_5_0 == ?\\\\) \n alt_5 = 2\n\n end\n case alt_5\n when 1\n # at line 143:12: ~ ( '\\\\\\\\' | '\\\\'' )\n if @input.peek(1).between?(0x0000, ?&) || @input.peek(1).between?(?(, ?[) || @input.peek(1).between?(?], 0x00FF)\n @input.consume\n else\n mse = MismatchedSet(nil)\n recover(mse)\n raise mse\n end\n\n\n\n when 2\n # at line 143:31: '\\\\\\\\' .\n match(?\\\\)\n match_any\n\n else\n break # out of loop for decision 5\n end\n end # loop for decision 5\n match(?\\')\n\n end\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 14)\n\n end", "def literal\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 33 )\n return_value = LiteralReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n root_0 = nil\n\n _last = _first_0 = nil\n string_literal267 = nil\n __IVAR268__ = nil\n __ID269__ = nil\n string_literal270 = nil\n string_literal271 = nil\n string_literal272 = nil\n string_literal273 = nil\n __NUMBER274__ = nil\n __STRING275__ = nil\n __DOC276__ = nil\n __REGEX277__ = nil\n __ARRAY278__ = nil\n __OBJECT280__ = nil\n string_literal282 = nil\n __ID283__ = nil\n string_literal286 = nil\n argument279 = nil\n property_definition281 = nil\n parameters284 = nil\n statement_block285 = nil\n parameters287 = nil\n statement_block288 = nil\n\n tree_for_string_literal267 = nil\n tree_for_IVAR268 = nil\n tree_for_ID269 = nil\n tree_for_string_literal270 = nil\n tree_for_string_literal271 = nil\n tree_for_string_literal272 = nil\n tree_for_string_literal273 = nil\n tree_for_NUMBER274 = nil\n tree_for_STRING275 = nil\n tree_for_DOC276 = nil\n tree_for_REGEX277 = nil\n tree_for_ARRAY278 = nil\n tree_for_OBJECT280 = nil\n tree_for_string_literal282 = nil\n tree_for_ID283 = nil\n tree_for_string_literal286 = nil\n\n begin\n # at line 229:3: ( 'this' | IVAR | ID | 'null' | 'true' | 'false' | 'undefined' | NUMBER | STRING | DOC | REGEX | ^( ARRAY ( argument )* ) | ^( OBJECT ( property_definition )* ) | ^( 'function' ( ID )? parameters statement_block ) | ^( '->' ( parameters )? statement_block ) )\n alt_41 = 15\n case look_41 = @input.peek( 1 )\n when THIS then alt_41 = 1\n when IVAR then alt_41 = 2\n when ID then alt_41 = 3\n when NULL then alt_41 = 4\n when TRUE then alt_41 = 5\n when FALSE then alt_41 = 6\n when UNDEFINED then alt_41 = 7\n when NUMBER then alt_41 = 8\n when STRING then alt_41 = 9\n when DOC then alt_41 = 10\n when REGEX then alt_41 = 11\n when ARRAY then alt_41 = 12\n when OBJECT then alt_41 = 13\n when FUNCTION then alt_41 = 14\n when ARROW then alt_41 = 15\n else\n raise NoViableAlternative( \"\", 41, 0 )\n end\n case alt_41\n when 1\n root_0 = @adaptor.create_flat_list\n\n\n # at line 229:5: 'this'\n _last = @input.look\n string_literal267 = match( THIS, TOKENS_FOLLOWING_THIS_IN_literal_1643 )\n\n tree_for_string_literal267 = @adaptor.copy_node( string_literal267 )\n\n @adaptor.add_child( root_0, tree_for_string_literal267 )\n\n\n\n when 2\n root_0 = @adaptor.create_flat_list\n\n\n # at line 230:5: IVAR\n _last = @input.look\n __IVAR268__ = match( IVAR, TOKENS_FOLLOWING_IVAR_IN_literal_1649 )\n\n tree_for_IVAR268 = @adaptor.copy_node( __IVAR268__ )\n\n @adaptor.add_child( root_0, tree_for_IVAR268 )\n\n\n\n when 3\n root_0 = @adaptor.create_flat_list\n\n\n # at line 231:5: ID\n _last = @input.look\n __ID269__ = match( ID, TOKENS_FOLLOWING_ID_IN_literal_1655 )\n\n tree_for_ID269 = @adaptor.copy_node( __ID269__ )\n\n @adaptor.add_child( root_0, tree_for_ID269 )\n\n\n\n when 4\n root_0 = @adaptor.create_flat_list\n\n\n # at line 232:5: 'null'\n _last = @input.look\n string_literal270 = match( NULL, TOKENS_FOLLOWING_NULL_IN_literal_1661 )\n\n tree_for_string_literal270 = @adaptor.copy_node( string_literal270 )\n\n @adaptor.add_child( root_0, tree_for_string_literal270 )\n\n\n\n when 5\n root_0 = @adaptor.create_flat_list\n\n\n # at line 233:5: 'true'\n _last = @input.look\n string_literal271 = match( TRUE, TOKENS_FOLLOWING_TRUE_IN_literal_1667 )\n\n tree_for_string_literal271 = @adaptor.copy_node( string_literal271 )\n\n @adaptor.add_child( root_0, tree_for_string_literal271 )\n\n\n\n when 6\n root_0 = @adaptor.create_flat_list\n\n\n # at line 234:5: 'false'\n _last = @input.look\n string_literal272 = match( FALSE, TOKENS_FOLLOWING_FALSE_IN_literal_1673 )\n\n tree_for_string_literal272 = @adaptor.copy_node( string_literal272 )\n\n @adaptor.add_child( root_0, tree_for_string_literal272 )\n\n\n\n when 7\n root_0 = @adaptor.create_flat_list\n\n\n # at line 235:5: 'undefined'\n _last = @input.look\n string_literal273 = match( UNDEFINED, TOKENS_FOLLOWING_UNDEFINED_IN_literal_1679 )\n\n tree_for_string_literal273 = @adaptor.copy_node( string_literal273 )\n\n @adaptor.add_child( root_0, tree_for_string_literal273 )\n\n\n\n when 8\n root_0 = @adaptor.create_flat_list\n\n\n # at line 236:5: NUMBER\n _last = @input.look\n __NUMBER274__ = match( NUMBER, TOKENS_FOLLOWING_NUMBER_IN_literal_1685 )\n\n tree_for_NUMBER274 = @adaptor.copy_node( __NUMBER274__ )\n\n @adaptor.add_child( root_0, tree_for_NUMBER274 )\n\n\n\n when 9\n root_0 = @adaptor.create_flat_list\n\n\n # at line 237:5: STRING\n _last = @input.look\n __STRING275__ = match( STRING, TOKENS_FOLLOWING_STRING_IN_literal_1691 )\n\n tree_for_STRING275 = @adaptor.copy_node( __STRING275__ )\n\n @adaptor.add_child( root_0, tree_for_STRING275 )\n\n\n\n when 10\n root_0 = @adaptor.create_flat_list\n\n\n # at line 238:5: DOC\n _last = @input.look\n __DOC276__ = match( DOC, TOKENS_FOLLOWING_DOC_IN_literal_1697 )\n\n tree_for_DOC276 = @adaptor.copy_node( __DOC276__ )\n\n @adaptor.add_child( root_0, tree_for_DOC276 )\n\n\n\n when 11\n root_0 = @adaptor.create_flat_list\n\n\n # at line 239:5: REGEX\n _last = @input.look\n __REGEX277__ = match( REGEX, TOKENS_FOLLOWING_REGEX_IN_literal_1703 )\n\n tree_for_REGEX277 = @adaptor.copy_node( __REGEX277__ )\n\n @adaptor.add_child( root_0, tree_for_REGEX277 )\n\n\n\n when 12\n root_0 = @adaptor.create_flat_list\n\n\n # at line 240:5: ^( ARRAY ( argument )* )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __ARRAY278__ = match( ARRAY, TOKENS_FOLLOWING_ARRAY_IN_literal_1711 )\n\n tree_for_ARRAY278 = @adaptor.copy_node( __ARRAY278__ )\n\n root_1 = @adaptor.become_root( tree_for_ARRAY278, root_1 )\n\n\n\n if @input.peek == DOWN\n match( DOWN, nil )\n # at line 240:14: ( argument )*\n while true # decision 37\n alt_37 = 2\n look_37_0 = @input.peek( 1 )\n\n if ( look_37_0.between?( AMP, AMP_ASGN ) || look_37_0 == POST_DECR || look_37_0.between?( GEQ, AREF ) || look_37_0.between?( GREATER, HAT ) || look_37_0.between?( ARROW, HAT_ASGN ) || look_37_0 == ASGN || look_37_0 == REGEX || look_37_0 == IN || look_37_0 == INCR || look_37_0.between?( INSTANCEOF, RSHIFT3 ) || look_37_0 == RSHIFT3_ASGN || look_37_0.between?( RSHIFT_ASGN, COLON ) || look_37_0 == LEQ || look_37_0.between?( LESS, SLASH ) || look_37_0 == SLASH_ASGN || look_37_0.between?( STAR, DECR ) || look_37_0 == STAR_ASGN || look_37_0 == LSHIFT || look_37_0.between?( DELETE, THIS ) || look_37_0.between?( MINUS, TILDE ) || look_37_0.between?( MINUS_ASGN, MOD ) || look_37_0.between?( MOD_ASGN, TYPEOF ) || look_37_0.between?( NEQ, UMINUS ) || look_37_0.between?( NEQQ, UNDEFINED ) || look_37_0 == NEW || look_37_0 == NOT || look_37_0.between?( NULL, UPLUS ) || look_37_0 == OBJECT || look_37_0.between?( EQ, OR_ASGN ) || look_37_0 == FALSE || look_37_0 == PIPE || look_37_0 == PIPE_ASGN || look_37_0 == PLUS || look_37_0.between?( ID, DOC ) )\n alt_37 = 1\n\n end\n case alt_37\n when 1\n # at line 240:14: argument\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_argument_IN_literal_1713 )\n argument279 = argument\n @state.following.pop\n\n @adaptor.add_child( root_1, argument279.tree )\n\n\n else\n break # out of loop for decision 37\n end\n end # loop for decision 37\n\n match( UP, nil )\n end\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 13\n root_0 = @adaptor.create_flat_list\n\n\n # at line 241:5: ^( OBJECT ( property_definition )* )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __OBJECT280__ = match( OBJECT, TOKENS_FOLLOWING_OBJECT_IN_literal_1724 )\n\n tree_for_OBJECT280 = @adaptor.copy_node( __OBJECT280__ )\n\n root_1 = @adaptor.become_root( tree_for_OBJECT280, root_1 )\n\n\n\n if @input.peek == DOWN\n match( DOWN, nil )\n # at line 241:15: ( property_definition )*\n while true # decision 38\n alt_38 = 2\n look_38_0 = @input.peek( 1 )\n\n if ( look_38_0 == GET || look_38_0 == COLON || look_38_0 == SET )\n alt_38 = 1\n\n end\n case alt_38\n when 1\n # at line 241:15: property_definition\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_property_definition_IN_literal_1726 )\n property_definition281 = property_definition\n @state.following.pop\n\n @adaptor.add_child( root_1, property_definition281.tree )\n\n\n else\n break # out of loop for decision 38\n end\n end # loop for decision 38\n\n match( UP, nil )\n end\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 14\n root_0 = @adaptor.create_flat_list\n\n\n # at line 242:5: ^( 'function' ( ID )? parameters statement_block )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal282 = match( FUNCTION, TOKENS_FOLLOWING_FUNCTION_IN_literal_1737 )\n\n tree_for_string_literal282 = @adaptor.copy_node( string_literal282 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal282, root_1 )\n\n\n\n match( DOWN, nil )\n # at line 242:19: ( ID )?\n alt_39 = 2\n look_39_0 = @input.peek( 1 )\n\n if ( look_39_0 == ID )\n alt_39 = 1\n end\n case alt_39\n when 1\n # at line 242:19: ID\n _last = @input.look\n __ID283__ = match( ID, TOKENS_FOLLOWING_ID_IN_literal_1739 )\n\n tree_for_ID283 = @adaptor.copy_node( __ID283__ )\n\n @adaptor.add_child( root_1, tree_for_ID283 )\n\n\n\n end\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_parameters_IN_literal_1742 )\n parameters284 = parameters\n @state.following.pop\n\n @adaptor.add_child( root_1, parameters284.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_statement_block_IN_literal_1744 )\n statement_block285 = statement_block\n @state.following.pop\n\n @adaptor.add_child( root_1, statement_block285.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 15\n root_0 = @adaptor.create_flat_list\n\n\n # at line 243:5: ^( '->' ( parameters )? statement_block )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal286 = match( ARROW, TOKENS_FOLLOWING_ARROW_IN_literal_1754 )\n\n tree_for_string_literal286 = @adaptor.copy_node( string_literal286 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal286, root_1 )\n\n\n\n match( DOWN, nil )\n # at line 243:13: ( parameters )?\n alt_40 = 2\n look_40_0 = @input.peek( 1 )\n\n if ( look_40_0 == PARAMS )\n alt_40 = 1\n end\n case alt_40\n when 1\n # at line 243:13: parameters\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_parameters_IN_literal_1756 )\n parameters287 = parameters\n @state.following.pop\n\n @adaptor.add_child( root_1, parameters287.tree )\n\n\n end\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_statement_block_IN_literal_1759 )\n statement_block288 = statement_block\n @state.following.pop\n\n @adaptor.add_child( root_1, statement_block288.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n end\n return_value.tree = @adaptor.rule_post_processing( root_0 )\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 33 )\n\n end\n \n return return_value\n end", "def consume_string(ending = T.unsafe(nil)); end", "def test_char_literal_simple\n check(C::CharLiteral, <<-EOS)\n |'x'\n EOS\n end", "def string!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 1 )\n\n type = STRING\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 15:3: ( '\\\"' (~ ( '\\\"' | '\\\\\\\\' ) | '\\\\\\\\' . )* '\\\"' | '\\\\'' (~ ( '\\\\'' | '\\\\\\\\' ) | '\\\\\\\\' . )* '\\\\'' )\n alt_3 = 2\n look_3_0 = @input.peek( 1 )\n\n if ( look_3_0 == 0x22 )\n alt_3 = 1\n elsif ( look_3_0 == 0x27 )\n alt_3 = 2\n else\n raise NoViableAlternative( \"\", 3, 0 )\n end\n case alt_3\n when 1\n # at line 15:5: '\\\"' (~ ( '\\\"' | '\\\\\\\\' ) | '\\\\\\\\' . )* '\\\"'\n match( 0x22 )\n # at line 15:10: (~ ( '\\\"' | '\\\\\\\\' ) | '\\\\\\\\' . )*\n while true # decision 1\n alt_1 = 3\n look_1_0 = @input.peek( 1 )\n\n if ( look_1_0.between?( 0x0, 0x21 ) || look_1_0.between?( 0x23, 0x5b ) || look_1_0.between?( 0x5d, 0xffff ) )\n alt_1 = 1\n elsif ( look_1_0 == 0x5c )\n alt_1 = 2\n\n end\n case alt_1\n when 1\n # at line 15:12: ~ ( '\\\"' | '\\\\\\\\' )\n if @input.peek( 1 ).between?( 0x0, 0x21 ) || @input.peek( 1 ).between?( 0x23, 0x5b ) || @input.peek( 1 ).between?( 0x5d, 0xff )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n\n when 2\n # at line 15:29: '\\\\\\\\' .\n match( 0x5c )\n match_any\n\n else\n break # out of loop for decision 1\n end\n end # loop for decision 1\n match( 0x22 )\n\n when 2\n # at line 16:5: '\\\\'' (~ ( '\\\\'' | '\\\\\\\\' ) | '\\\\\\\\' . )* '\\\\''\n match( 0x27 )\n # at line 16:10: (~ ( '\\\\'' | '\\\\\\\\' ) | '\\\\\\\\' . )*\n while true # decision 2\n alt_2 = 3\n look_2_0 = @input.peek( 1 )\n\n if ( look_2_0.between?( 0x0, 0x26 ) || look_2_0.between?( 0x28, 0x5b ) || look_2_0.between?( 0x5d, 0xffff ) )\n alt_2 = 1\n elsif ( look_2_0 == 0x5c )\n alt_2 = 2\n\n end\n case alt_2\n when 1\n # at line 16:12: ~ ( '\\\\'' | '\\\\\\\\' )\n if @input.peek( 1 ).between?( 0x0, 0x26 ) || @input.peek( 1 ).between?( 0x28, 0x5b ) || @input.peek( 1 ).between?( 0x5d, 0xff )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n\n when 2\n # at line 16:29: '\\\\\\\\' .\n match( 0x5c )\n match_any\n\n else\n break # out of loop for decision 2\n end\n end # loop for decision 2\n match( 0x27 )\n\n end\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 1 )\n\n end", "def string!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 98 )\n\n type = STRING\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 897:3: ( '\\\\'' (~ ( '\\\\'' | '\\\\\\\\' ) | '\\\\\\\\' . )* '\\\\'' | '\\\"' (~ ( '\\\"' | '\\\\\\\\' | '#' ) | '\\\\\\\\' . | {...}? => INTERPOLATION | '#' )* '\\\"' )\n alt_16 = 2\n look_16_0 = @input.peek( 1 )\n\n if ( look_16_0 == 0x27 )\n alt_16 = 1\n elsif ( look_16_0 == 0x22 )\n alt_16 = 2\n else\n raise NoViableAlternative( \"\", 16, 0 )\n end\n case alt_16\n when 1\n # at line 897:5: '\\\\'' (~ ( '\\\\'' | '\\\\\\\\' ) | '\\\\\\\\' . )* '\\\\''\n match( 0x27 )\n # at line 897:10: (~ ( '\\\\'' | '\\\\\\\\' ) | '\\\\\\\\' . )*\n while true # decision 14\n alt_14 = 3\n look_14_0 = @input.peek( 1 )\n\n if ( look_14_0.between?( 0x0, 0x26 ) || look_14_0.between?( 0x28, 0x5b ) || look_14_0.between?( 0x5d, 0xffff ) )\n alt_14 = 1\n elsif ( look_14_0 == 0x5c )\n alt_14 = 2\n\n end\n case alt_14\n when 1\n # at line 897:12: ~ ( '\\\\'' | '\\\\\\\\' )\n if @input.peek( 1 ).between?( 0x0, 0x26 ) || @input.peek( 1 ).between?( 0x28, 0x5b ) || @input.peek( 1 ).between?( 0x5d, 0xff )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n\n when 2\n # at line 897:31: '\\\\\\\\' .\n match( 0x5c )\n match_any\n\n else\n break # out of loop for decision 14\n end\n end # loop for decision 14\n match( 0x27 )\n\n when 2\n # at line 898:5: '\\\"' (~ ( '\\\"' | '\\\\\\\\' | '#' ) | '\\\\\\\\' . | {...}? => INTERPOLATION | '#' )* '\\\"'\n match( 0x22 )\n # at line 899:5: (~ ( '\\\"' | '\\\\\\\\' | '#' ) | '\\\\\\\\' . | {...}? => INTERPOLATION | '#' )*\n while true # decision 15\n alt_15 = 5\n alt_15 = @dfa15.predict( @input )\n case alt_15\n when 1\n # at line 899:7: ~ ( '\\\"' | '\\\\\\\\' | '#' )\n if @input.peek( 1 ).between?( 0x0, 0x21 ) || @input.peek( 1 ).between?( 0x24, 0x5b ) || @input.peek( 1 ).between?( 0x5d, 0xff )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n\n when 2\n # at line 900:7: '\\\\\\\\' .\n match( 0x5c )\n match_any\n\n when 3\n # at line 901:7: {...}? => INTERPOLATION\n raise FailedPredicate( \"STRING\", \" at_interpolation? \" ) unless ( ( at_interpolation? ) )\n interpolation!\n # --> action\n type = DSTRING \n # <-- action\n\n when 4\n # at line 902:7: '#'\n match( 0x23 )\n\n else\n break # out of loop for decision 15\n end\n end # loop for decision 15\n match( 0x22 )\n\n end\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 98 )\n\n end", "def string!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 29 )\n\n\n\n type = STRING\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 195:5: '\\\"' ( '\\\\\\\\' '\\\"' |~ ( '\\\"' | '\\\\r' | '\\\\n' ) )* '\\\"'\n match( 0x22 )\n # at line 196:5: ( '\\\\\\\\' '\\\"' |~ ( '\\\"' | '\\\\r' | '\\\\n' ) )*\n while true # decision 3\n alt_3 = 3\n look_3_0 = @input.peek( 1 )\n\n if ( look_3_0 == 0x5c )\n look_3_2 = @input.peek( 2 )\n\n if ( look_3_2 == 0x22 )\n look_3_4 = @input.peek( 3 )\n\n if ( look_3_4.between?( 0x0, 0x9 ) || look_3_4.between?( 0xb, 0xc ) || look_3_4.between?( 0xe, 0xffff ) )\n alt_3 = 1\n\n else\n alt_3 = 2\n\n end\n elsif ( look_3_2.between?( 0x0, 0x9 ) || look_3_2.between?( 0xb, 0xc ) || look_3_2.between?( 0xe, 0x21 ) || look_3_2.between?( 0x23, 0xffff ) )\n alt_3 = 2\n\n end\n elsif ( look_3_0.between?( 0x0, 0x9 ) || look_3_0.between?( 0xb, 0xc ) || look_3_0.between?( 0xe, 0x21 ) || look_3_0.between?( 0x23, 0x5b ) || look_3_0.between?( 0x5d, 0xffff ) )\n alt_3 = 2\n\n end\n case alt_3\n when 1\n # at line 196:7: '\\\\\\\\' '\\\"'\n match( 0x5c )\n match( 0x22 )\n\n when 2\n # at line 197:7: ~ ( '\\\"' | '\\\\r' | '\\\\n' )\n if @input.peek( 1 ).between?( 0x0, 0x9 ) || @input.peek( 1 ).between?( 0xb, 0xc ) || @input.peek( 1 ).between?( 0xe, 0x21 ) || @input.peek( 1 ).between?( 0x23, 0xff )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n break # out of loop for decision 3\n end\n end # loop for decision 3\n\n match( 0x22 )\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 29 )\n\n\n end", "def type_string!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 36 )\n\n\n\n type = TYPE_STRING\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 212:14: 'string'\n match( \"string\" )\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 36 )\n\n\n end", "def _string\n\n _save = self.pos\n while true # choice\n _tmp = apply(:_dbl_string)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_sgl_string)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_string unless _tmp\n return _tmp\n end", "def called_with_string_literal?\n @string_allocation_count = 0\n method_array.each do |m|\n positions.each {|position| @string_allocation_count += 1 if m.arg_types[position] == :string_literal }\n @string_allocation_count += 1 if m.receiver == :string_literal\n end\n !@string_allocation_count.zero?\n end", "def double_quote_string_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 44)\n\n type = DOUBLE_QUOTE_STRING_LITERAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 496:4: '\\\"' ( ESC | ~ ( '\\\\\\\\' | '\\\"' ) )* '\\\"'\n match(?\")\n # at line 496:8: ( ESC | ~ ( '\\\\\\\\' | '\\\"' ) )*\n loop do #loop 7\n alt_7 = 3\n look_7_0 = @input.peek(1)\n\n if (look_7_0 == ?\\\\) \n alt_7 = 1\n elsif (look_7_0.between?(0x0000, ?!) || look_7_0.between?(?#, ?[) || look_7_0.between?(?], 0xFFFF)) \n alt_7 = 2\n\n end\n case alt_7\n when 1\n # at line 496:9: ESC\n esc!\n\n when 2\n # at line 496:15: ~ ( '\\\\\\\\' | '\\\"' )\n if @input.peek(1).between?(0x0000, ?!) || @input.peek(1).between?(?#, ?[) || @input.peek(1).between?(?], 0x00FF)\n @input.consume\n else\n mse = MismatchedSet(nil)\n recover(mse)\n raise mse\n end\n\n\n\n else\n break #loop 7\n end\n end\n match(?\")\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 44)\n\n end", "def string\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 31 )\n return_value = StringReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n begin\n # at line 153:12: ( '\\\"' ( ESC | ~ ( '\\\\\\\\' | '\\\"' | '\\\\'' ) | '\\\\'' )* '\\\"' )\n # at line 153:12: ( '\\\"' ( ESC | ~ ( '\\\\\\\\' | '\\\"' | '\\\\'' ) | '\\\\'' )* '\\\"' )\n # at line 153:14: '\\\"' ( ESC | ~ ( '\\\\\\\\' | '\\\"' | '\\\\'' ) | '\\\\'' )* '\\\"'\n match(T__14, TOKENS_FOLLOWING_T__14_IN_string_1163)\n # at line 153:18: ( ESC | ~ ( '\\\\\\\\' | '\\\"' | '\\\\'' ) | '\\\\'' )*\n while true # decision 39\n alt_39 = 2\n look_39_0 = @input.peek(1)\n\n if (look_39_0.between?(WS, T__13) || look_39_0.between?(T__15, T__30) || look_39_0.between?(T__32, T__43))\n alt_39 = 1\n\n end\n case alt_39\n when 1\n # at line\n if @input.peek(1).between?(WS, T__13) || @input.peek(1).between?(T__15, T__30) || @input.peek(1).between?(T__32, T__43)\n @input.consume\n @state.error_recovery = false\n else\n mse = MismatchedSet(nil)\n raise mse\n end\n\n\n else\n break # out of loop for decision 39\n end\n end # loop for decision 39\n match(T__14, TOKENS_FOLLOWING_T__14_IN_string_1193)\n\n # - - - - - - - rule clean up - - - - - - - -\n return_value.stop = @input.look(-1)\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 31 )\n\n end\n\n return return_value\n end", "def consume_string(ending = nil)\n ending = @s.current if ending.nil?\n value = String.new\n\n until @s.eos?\n case char = @s.consume\n when ending\n break\n\n when \"\\n\"\n # Parse error.\n @s.reconsume\n return create_token(:bad_string,\n :error => true,\n :value => value)\n\n when '\\\\'\n case @s.peek\n when ''\n # End of the input, so do nothing.\n next\n\n when \"\\n\"\n @s.consume\n\n else\n value << consume_escaped\n end\n\n else\n value << char\n end\n end\n\n create_token(:string, :value => value)\n end", "def frozen_string_literal_specified?; end", "def is_literal?\n if @operator && @operator.is_negation?\n @right_sentence.is_literal?\n else\n @left_sentence.nil? && @right_sentence.nil?\n end\n end", "def action_char_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 53)\n\n \n # - - - - main rule block - - - -\n # at line 567:4: '\\\\'' (~ ( '\\\\\\\\' | '\\\\'' ) | '\\\\\\\\' . )* '\\\\''\n match(?\\')\n # at line 567:9: (~ ( '\\\\\\\\' | '\\\\'' ) | '\\\\\\\\' . )*\n loop do #loop 14\n alt_14 = 3\n look_14_0 = @input.peek(1)\n\n if (look_14_0.between?(0x0000, ?&) || look_14_0.between?(?(, ?[) || look_14_0.between?(?], 0xFFFF)) \n alt_14 = 1\n elsif (look_14_0 == ?\\\\) \n alt_14 = 2\n\n end\n case alt_14\n when 1\n # at line 567:11: ~ ( '\\\\\\\\' | '\\\\'' )\n if @input.peek(1).between?(0x0000, ?&) || @input.peek(1).between?(?(, ?[) || @input.peek(1).between?(?], 0x00FF)\n @input.consume\n else\n mse = MismatchedSet(nil)\n recover(mse)\n raise mse\n end\n\n\n\n when 2\n # at line 567:26: '\\\\\\\\' .\n match(?\\\\)\n match_any\n\n else\n break #loop 14\n end\n end\n match(?\\')\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 53)\n\n end", "def action_string_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 54)\n\n \n # - - - - main rule block - - - -\n # at line 572:4: '\\\"' (~ ( '\\\\\\\\' | '\\\"' ) | '\\\\\\\\' . )* '\\\"'\n match(?\")\n # at line 572:8: (~ ( '\\\\\\\\' | '\\\"' ) | '\\\\\\\\' . )*\n loop do #loop 15\n alt_15 = 3\n look_15_0 = @input.peek(1)\n\n if (look_15_0.between?(0x0000, ?!) || look_15_0.between?(?#, ?[) || look_15_0.between?(?], 0xFFFF)) \n alt_15 = 1\n elsif (look_15_0 == ?\\\\) \n alt_15 = 2\n\n end\n case alt_15\n when 1\n # at line 572:10: ~ ( '\\\\\\\\' | '\\\"' )\n if @input.peek(1).between?(0x0000, ?!) || @input.peek(1).between?(?#, ?[) || @input.peek(1).between?(?], 0x00FF)\n @input.consume\n else\n mse = MismatchedSet(nil)\n recover(mse)\n raise mse\n end\n\n\n\n when 2\n # at line 572:24: '\\\\\\\\' .\n match(?\\\\)\n match_any\n\n else\n break #loop 15\n end\n end\n match(?\")\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 54)\n\n end", "def string!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 5 )\n\n type = STRING\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 280:8: '\\\"' ( ESC_SEQ | ' ' | ~ ( '\\\\\\\\' | '\\\"' | ' ' ) )* '\\\"'\n match( 0x22 )\n # at line 280:12: ( ESC_SEQ | ' ' | ~ ( '\\\\\\\\' | '\\\"' | ' ' ) )*\n while true # decision 1\n alt_1 = 4\n look_1_0 = @input.peek( 1 )\n\n if ( look_1_0 == 0x5c )\n alt_1 = 1\n elsif ( look_1_0 == 0x20 )\n alt_1 = 2\n elsif ( look_1_0.between?( 0x0, 0x1f ) || look_1_0 == 0x21 || look_1_0.between?( 0x23, 0x5b ) || look_1_0.between?( 0x5d, 0xffff ) )\n alt_1 = 3\n\n end\n case alt_1\n when 1\n # at line 280:14: ESC_SEQ\n esc_seq!\n\n when 2\n # at line 280:24: ' '\n match( 0x20 )\n\n when 3\n # at line 280:30: ~ ( '\\\\\\\\' | '\\\"' | ' ' )\n if @input.peek( 1 ).between?( 0x0, 0x1f ) || @input.peek(1) == 0x21 || @input.peek( 1 ).between?( 0x23, 0x5b ) || @input.peek( 1 ).between?( 0x5d, 0xff )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n\n else\n break # out of loop for decision 1\n end\n end # loop for decision 1\n match( 0x22 )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 5 )\n\n end", "def parse_literal(str)\n Kernel.eval(str)\n rescue Exception => ex\n raise TypeSystem::InvalidValueLiteralError, \"Invalid ruby value literal #{str.inspect}\", ex.backtrace\n end", "def parse_str(io, stop_quote)\n buf = ''\n loop do\n c = io.read_one_char\n if c.nil?\n raise Error, \"The IO ran out before the end of a literal string\"\n elsif buf.length > 0 && buf[-1..-1] == ESC # If this char was escaped\n # Trim the escape character at the end of the buffer\n buf = buf[0..-2] \n buf << c\n elsif c == stop_quote\n return buf\n else\n buf << c\n end\n end\n end", "def literal?(node); end", "def literal_char!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 43)\n\n \n # - - - - main rule block - - - -\n # at line 491:2: ( ESC | ~ ( '\\\\'' | '\\\\\\\\' ) )\n alt_6 = 2\n look_6_0 = @input.peek(1)\n\n if (look_6_0 == ?\\\\) \n alt_6 = 1\n elsif (look_6_0.between?(0x0000, ?&) || look_6_0.between?(?(, ?[) || look_6_0.between?(?], 0xFFFF)) \n alt_6 = 2\n else\n nvae = NoViableAlternative(\"\", 6, 0)\n raise nvae\n end\n case alt_6\n when 1\n # at line 491:4: ESC\n esc!\n\n when 2\n # at line 492:4: ~ ( '\\\\'' | '\\\\\\\\' )\n if @input.peek(1).between?(0x0000, ?&) || @input.peek(1).between?(?(, ?[) || @input.peek(1).between?(?], 0x00FF)\n @input.consume\n else\n mse = MismatchedSet(nil)\n recover(mse)\n raise mse\n end\n\n\n\n end\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 43)\n\n end", "def double_angle_string_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 45)\n\n type = DOUBLE_ANGLE_STRING_LITERAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 500:4: '<<' ( . )* '>>'\n match(\"<<\")\n # at line 500:9: ( . )*\n loop do #loop 8\n alt_8 = 2\n look_8_0 = @input.peek(1)\n\n if (look_8_0 == ?>) \n look_8_1 = @input.peek(2)\n\n if (look_8_1 == ?>) \n alt_8 = 2\n elsif (look_8_1.between?(0x0000, ?=) || look_8_1.between?(??, 0xFFFF)) \n alt_8 = 1\n\n end\n elsif (look_8_0.between?(0x0000, ?=) || look_8_0.between?(??, 0xFFFF)) \n alt_8 = 1\n\n end\n case alt_8\n when 1\n # at line 500:9: .\n match_any\n\n else\n break #loop 8\n end\n end\n match(\">>\")\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 45)\n\n end", "def literal(buffer)\n reader = lambda { |string = ''|\n buffer.major_mode.read(1) do |event|\n if unicode = event.unicode\n string += unicode # copy\n buffer.message string.inspect\n\n case result = literal_handle(buffer, string)\n when nil\n reader.call(string)\n when String\n literal_insert(buffer, result)\n end\n else\n return # Unverrichteter Dinge\n end\n end\n }\n\n reader.call\n end", "def nil_literal?(name)\n node.public_send(name).kind_of?(Rubinius::AST::NilLiteral)\n end", "def str_value\n return @str_value unless @str_value.nil?\n if is_string != 0\n io = _parent.strings._io\n _pos = io.pos\n io.seek(value_or_ofs_value)\n @str_value = (io.read_bytes_term(0, false, true, true)).force_encoding(\"UTF-8\")\n io.seek(_pos)\n end\n @str_value\n end", "def consume_string(ending)\n value = ''\n\n until @s.eos?\n case char = @s.consume\n when ending\n break\n\n when \"\\n\"\n @s.reconsume\n return create_token(:bad_string,\n :error => true,\n :value => value)\n\n when '\\\\'\n case @s.peek\n when ''\n # End of the input, so do nothing.\n next\n\n when \"\\n\"\n @s.consume\n\n else\n value << consume_escaped\n end\n\n else\n value << char\n end\n end\n\n create_token(:string, :value => value)\n end", "def literal_handle(_buffer, string)\n case string\n when /^\\d{,3}$/\n return if string.size < 3\n [string.to_i].pack('U')\n when /^o([0-7]{,3})$/i\n return if Regexp.last_match(1).size < 3\n [Integer(\"0#{Regexp.last_match(1)}\")].pack('U')\n when /^x(\\h{,2})$/i\n return if Regexp.last_match(1).size < 2\n [Integer(\"0x#{Regexp.last_match(1)}\")].pack('U')\n when /^u(\\h{,4})$/\n return if Regexp.last_match(1).size < 4\n [Integer(\"0x#{Regexp.last_match(1)}\")].pack('U')\n when /^U(\\h{,8})$/\n return if Regexp.last_match(1).size < 8\n [Integer(\"0x#{Regexp.last_match(1)}\")].pack('U')\n end\n end", "def stringexpr \n\t\n\t$cst.add_branch(\"StringExpr\")\n\t\n\tmatch_token(\"T_QUOTE\", $tokens[$index])\n\tcharList\n\tmatch_token(\"T_QUOTE\", $tokens[$index])\n\n\t$cst.ascend\n\t\nend", "def isStringConstant(str)\n return (str[0].to_s == \"\\\"\" and str[str.to_s.size - 1].to_s == \"\\\"\") #if statement surrounded by quotation marks of any kinds\n\n return false\nend", "def parse_literal(expr)\n val = expr[1][1][1]\n val = val.to_sym if expr[0] == :symbol_literal ||\n expr[0] == :assoc_new\n val\n end", "def quoted_string\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 31 )\n text = nil\n string31 = nil\n\n begin\n # at line 189:4: QUOTE string QUOTE\n match( QUOTE, TOKENS_FOLLOWING_QUOTE_IN_quoted_string_1727 )\n @state.following.push( TOKENS_FOLLOWING_string_IN_quoted_string_1729 )\n string31 = string\n @state.following.pop\n match( QUOTE, TOKENS_FOLLOWING_QUOTE_IN_quoted_string_1731 )\n # --> action\n text = ( string31 && @input.to_s( string31.start, string31.stop ) )\n # <-- action\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 31 )\n\n end\n \n return text\n end", "def literal?\n @value.is_a?(String) || @value.is_a?(Integer)\n end", "def start_identifier?(text = T.unsafe(nil)); end", "def string_at?(line, column)\n node = node_at(line, column)\n # @todo raise InvalidOffset or InvalidRange or something?\n return false if node.nil?\n node.type == :str or node.type == :dstr\n end", "def peek_next\n fail 'No string specified' unless @str\n\n return Token.new(:eos) if skip_space == :eos\n\n PATTERNS.each do |re, func|\n re.match(@str) do |mat|\n @last_re = re # This is what will be removed\n mat = mat.to_s\n return func.is_a?(Symbol) ? send(func, mat) : instance_exec(mat, &func)\n end\n end\n end", "def frozen_string_literal?; end", "def correct_for_literals(lineno, column)\n tstring_index = @lex.index do |pos, token|\n pos[0] == lineno and pos[1] == column and\n token == :on_tstring_content\n end\n\n tstring_index ? @lex[tstring_index -1][0][1] : column\n end", "def _string\n\n begin # choice\n _tmp = apply(:_dbl_string)\n break if _tmp\n _tmp = apply(:_sgl_string)\n end while false # end choice\n\n set_failed_rule :_string unless _tmp\n return _tmp\n end", "def read_cstring\n nul_pos = @content.index(NUL, @position)\n raise Error, \"no cstring found!\" unless nul_pos\n\n sz = nul_pos - @position\n str = @content[@position, sz]\n @position += sz + 1\n return str\n end", "def test_string3\n token, value, rest = @c.lex('\" hello \\\\\\\\\" for')\n assert_equal(:string, token)\n assert_equal('\" hello \\\\\\\\\"', value)\n assert_equal(' for', rest)\n end", "def parse_constant\n case current\n when :LIT_STRING\n ExprString.new(expect(:LIT_STRING))\n when :KW_TRUE, :KW_FALSE then\n parse_bool_constant\n when :LIT_INT, :LIT_FLOAT then\n parse_numeric_constant\n else\n parse_complex_constant\n end\n end", "def lex_en_inside_string=(_arg0); end", "def find_literal(what)\n idx = @literals.index(what)\n return idx if idx\n add_literal(what)\n end", "def is_strtype?(); @type == GRT_STRTYPE; end", "def process_lit(exp)\n # TODO what about floats and big numbers?\n\n value = exp.shift\n c_type = exp.c_type\n case c_type\n when CType.long, CType.float then\n return value.to_s\n when CType.symbol then\n return value.to_s.inspect # HACK wrong! write test!\n else\n raise \"Bug! no: Unknown literal #{value}:#{value.class}\"\n end\n end", "def push_literal(*args)\n new_literal = Literal.new(self, *args)\n @literal_stack.push(new_literal)\n\n if new_literal.words? && new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_backslash_delimited_words\n else\n self.class.lex_en_plain_backslash_delimited_words\n end\n elsif new_literal.words? && !new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_words\n else\n self.class.lex_en_plain_words\n end\n elsif !new_literal.words? && new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_backslash_delimited\n else\n self.class.lex_en_plain_backslash_delimited\n end\n else\n if new_literal.interpolate?\n self.class.lex_en_interp_string\n else\n self.class.lex_en_plain_string\n end\n end\n end", "def string_token\n if %w(' \").include?(@chunk[0])\n result = process_string @chunk[0..-1], @chunk[0]\n if @looking_for_args\n token :Argument, result.str, attrs: { quoted_by: @chunk[0] }\n else\n token :Command, result.str\n end\n result.consumed_length\n else\n did_not_match\n end\n end", "def test_string2\n token, value, rest = @c.lex(' \"say \\\\\"hello\\\\\"\" while')\n assert_equal(:string, token)\n assert_equal('\"say \\\\\"hello\\\\\"\"', value)\n assert_equal(' while', rest)\n end", "def process_lit(exp)\n # TODO: audit against obfuscator\n value = exp.shift\n case value\n when Integer then\n return \"LONG2NUM(#{value})\"\n when Float then\n return \"rb_float_new(#{value})\"\n when Symbol\n return \"ID2SYM(rb_intern(#{value.to_s.inspect}))\"\n when Range\n f = process_lit [ value.first ]\n l = process_lit [ value.last ]\n x = 0\n x = 1 if value.exclude_end?\n\n return \"rb_range_new(#{f}, #{l}, #{x})\"\n when Regexp\n src = value.source\n return \"rb_reg_new(#{src.inspect}, #{src.size}, #{value.options})\"\n else\n raise \"Bug! no: Unknown literal #{value}:#{value.class}\"\n end\n return nil\n end", "def string\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 60 )\n\n\n return_value = StringReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n\n root_0 = nil\n\n __COMILLA324__ = nil\n set325 = nil\n __COMILLA326__ = nil\n\n\n tree_for_COMILLA324 = nil\n tree_for_set325 = nil\n tree_for_COMILLA326 = nil\n\n begin\n root_0 = @adaptor.create_flat_list\n\n\n # at line 573:9: COMILLA ( EscapeSequence |~ ( '\\\\'' | '\\\\\\\\' ) )* COMILLA\n __COMILLA324__ = match( COMILLA, TOKENS_FOLLOWING_COMILLA_IN_string_2819 )\n if @state.backtracking == 0\n tree_for_COMILLA324 = @adaptor.create_with_payload( __COMILLA324__ )\n @adaptor.add_child( root_0, tree_for_COMILLA324 )\n\n end\n\n # at line 573:17: ( EscapeSequence |~ ( '\\\\'' | '\\\\\\\\' ) )*\n while true # decision 44\n alt_44 = 2\n look_44_0 = @input.peek( 1 )\n\n if ( look_44_0.between?( ASIGNACION, COMA ) || look_44_0.between?( CORDER, WS ) || look_44_0.between?( T__81, T__82 ) )\n alt_44 = 1\n\n end\n case alt_44\n when 1\n # at line \n set325 = @input.look\n\n if @input.peek( 1 ).between?( ASIGNACION, COMA ) || @input.peek( 1 ).between?( CORDER, WS ) || @input.peek( 1 ).between?( T__81, T__82 )\n @input.consume\n if @state.backtracking == 0\n @adaptor.add_child( root_0, @adaptor.create_with_payload( set325 ) )\n end\n\n @state.error_recovery = false\n\n else\n @state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )\n\n\n mse = MismatchedSet( nil )\n raise mse\n\n end\n\n\n\n else\n break # out of loop for decision 44\n end\n end # loop for decision 44\n\n __COMILLA326__ = match( COMILLA, TOKENS_FOLLOWING_COMILLA_IN_string_2837 )\n if @state.backtracking == 0\n tree_for_COMILLA326 = @adaptor.create_with_payload( __COMILLA326__ )\n @adaptor.add_child( root_0, tree_for_COMILLA326 )\n\n end\n\n\n # - - - - - - - rule clean up - - - - - - - -\n return_value.stop = @input.look( -1 )\n\n\n if @state.backtracking == 0\n return_value.tree = @adaptor.rule_post_processing( root_0 )\n @adaptor.set_token_boundaries( return_value.tree, return_value.start, return_value.stop )\n\n end\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n return_value.tree = @adaptor.create_error_node( @input, return_value.start, @input.look(-1), re )\n\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 60 )\n\n\n end\n\n return return_value\n end", "def literal\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 22 )\n\n\n value = nil\n\n\n a = nil\n\n\n begin\n # at line 142:3: (a= INTEGER |a= FLOAT |a= BOOLEAN |a= STRING |a= CHAR )\n alt_38 = 5\n case look_38 = @input.peek( 1 )\n when INTEGER then alt_38 = 1\n when FLOAT then alt_38 = 2\n when BOOLEAN then alt_38 = 3\n when STRING then alt_38 = 4\n when CHAR then alt_38 = 5\n else\n @state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )\n\n\n\n raise NoViableAlternative( \"\", 38, 0 )\n\n end\n case alt_38\n when 1\n # at line 142:5: a= INTEGER\n a = match( INTEGER, TOKENS_FOLLOWING_INTEGER_IN_literal_1037 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Int, a.text) \n # <-- action\n end\n\n\n when 2\n # at line 143:5: a= FLOAT\n a = match( FLOAT, TOKENS_FOLLOWING_FLOAT_IN_literal_1047 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Float, a.text) \n # <-- action\n end\n\n\n when 3\n # at line 144:5: a= BOOLEAN\n a = match( BOOLEAN, TOKENS_FOLLOWING_BOOLEAN_IN_literal_1059 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Bool, a.text) \n # <-- action\n end\n\n\n when 4\n # at line 145:5: a= STRING\n a = match( STRING, TOKENS_FOLLOWING_STRING_IN_literal_1069 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:String, a.text) \n # <-- action\n end\n\n\n when 5\n # at line 146:5: a= CHAR\n a = match( CHAR, TOKENS_FOLLOWING_CHAR_IN_literal_1080 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Char, a.text) \n # <-- action\n end\n\n\n end\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 22 )\n\n\n end\n\n return value\n end", "def test_string6\n input = '\" hello\\n\"'\n assert_equal(10, input.length)\n token, value, rest = @c.lex(input)\n assert_equal(:string, token)\n assert_equal(input, value)\n assert_equal('', rest)\n end", "def has_literal?(literal)\n return true if self.scan_for { @decoder.literal_scan(literal) }\n return false if self.c_method?\n\n exptree = ExpressionTree.of(method: @method, source: @source)\n exptree.includes?(literal)\n end", "def node_as_string(node)\n return nil unless node\n\n case node.type\n when :symbol, :symbol_literal\n node.source[1..]\n when :label\n node.source[0..-2]\n when :dyna_symbol\n content = node.jump(:tstring_content)\n content.nil? ? node.source : content.source\n when :string_literal\n content = node.jump(:tstring_content)\n return content.source if content != node\n\n # This attempts to work around a bug in YARD (https://github.com/lsegal/yard/issues/779)\n # Check to see if the string source appears to have a heredoc open tag (or \"most\" of one)\n # If so, remove the first line and the last line (if the latter contains the heredoc tag)\n source = node.source\n if source =~ HEREDOC_START\n lines = source.split(\"\\n\")\n source = lines[1..(lines.last.include?(Regexp.last_match(1)[0..-2]) ? -2 : -1)].join(\"\\n\") if lines.size > 1\n end\n\n source\n when :regexp_literal\n node.source\n end\n end", "def get_target_char_literal_from_antlrchar_literal(codegen, literal)\n c = Grammar.get_char_value_from_grammar_char_literal(literal)\n prefix = \"'\"\n if (codegen.attr_grammar.get_max_char_value > 255)\n prefix = \"L'\"\n else\n if (!((c & 0x80)).equal?(0))\n # if in char mode prevent sign extensions\n return \"\" + RJava.cast_to_string(c)\n end\n end\n return prefix + RJava.cast_to_string(escape_char(c)) + \"'\"\n end", "def literal_token\n if match = @chunk.match(OPERATOR)\n value, _ = *match\n else\n value = @chunk[0]\n end\n tag = value\n\n if COMPOUND_ASSIGN.include?(value)\n tag = :COP\n else\n case value\n when '(', '{', '[' then @ends.push(INVERSES[value])\n when ')', '}', ']'\n prev = @tokens[-1]\n pair(value)\n tokens.delete_at(-1) if prev && prev[0] == :TERM\n end\n end\n token(tag, value)\n value.size\n end", "def shift_docstring\n if expressions.first.kind_of?(Rubinius::AST::StringLiteral) &&\n expressions.size > 1\n expressions.shift\n end\n end", "def type_of literal\n case literal.sexp_type\n when :true, :false\n 'unsigned char'\n when :array\n \"#{ type_of(literal.sexp_body.first) }[#{ literal.sexp_body.count }]\"\n when :lit\n case literal.sexp_body.first.class.name\n when 'NilClass', 'Fixnum', 'Bignum'\n 'signed long'\n when 'Float'\n 'float'\n else\n raise \"Unsupported type #{literal.sexp_body.first.class.name} used: #{literal.sexp_body.first}\"\n end\n end\n end", "def property_is_literal_parse_type?(element, parent, &block)\n att = element.attribute_with_ns('parseType', RDF.to_uri.to_s)\n object = att.nil? ? nil : (att.value == 'Literal' ? true : nil)\n return nil if object.nil?\n block.call([parent, element.ns_name, element.to_s])\n true\n end", "def consume_string()\n delimiter = @partial_string['delimiter']\n escape_next = @partial_string['escape_next']\n built = @partial_string['built']\n success = false\n\n debug \"Attempting to consume string (with delim: #{delimiter})\"\n loop do\n first = @stream[0]\n break unless first\n\n debug \"Iter for char '#{first}', escaping this char? #{escape_next}, delimiter '#{delimiter}' - current: #{built}\",\n :verbose\n if escape_next\n built += ESCAPE_CHARS.include?(first) ? ESCAPE_CHARS[first] : first\n escape_next = false\n elsif first == '\\\\'\n escape_next = true\n elsif first == delimiter\n consume\n success = true\n\n break\n else\n built += first\n end\n\n consume\n end\n\n debug \"String consumption success?: #{success}\"\n if success\n reset_state\n create_token(:string, built)\n else\n @partial_string['escape_next'] = escape_next\n @partial_string['built'] = built\n end\n end", "def next_token\n token = @enum[@pointer]\n raise NonstringTokenError unless token.nil? || token.kind_of?(String) \n @pointer += 1\n token\n end", "def test_single_line_string\r\n test_string = \"abc\"\r\n source = XfOOrth::StringSource.new(test_string)\r\n\r\n assert_equal(source.get, 'a')\r\n refute(source.eoln?)\r\n refute(source.eof?)\r\n\r\n assert_equal(source.get, 'b')\r\n refute(source.eoln?)\r\n refute(source.eof?)\r\n\r\n assert_equal(source.get, 'c')\r\n refute(source.eoln?)\r\n refute(source.eof?)\r\n\r\n assert_equal(source.get, ' ')\r\n assert(source.eoln?)\r\n refute(source.eof?)\r\n\r\n assert_equal(source.get, nil)\r\n assert(source.eoln?)\r\n assert(source.eof?)\r\n end", "def literal; end", "def lex_en_inside_string; end", "def to_string\n case node\n in SyntaxTree::StringLiteral[parts: [SyntaxTree::TStringContent[value:]]]\n value\n in SyntaxTree::StringLiteral[parts:]\n raise CompilationError, \"Unexpected string parts type: #{parts.inspect}\"\n else\n raise CompilationError, \"Unexpected node type: #{node.class.name}\"\n end\n end", "def match(ptr, depth = 0)\n case c = ptr.peek(1)\n when '\"', '`'\n start_pos = ptr.pos\n ptr.pos += 1\n AST.new(:string, value: ptr.scan_until(/#{c}/).chop,\n attributes: { type: char_to_type(c) },\n pos: start_pos)\n end\n end", "def is_strname?(); @type == GRT_STRNAME; end", "def literals_list\n @literals ||= \"\"\n end", "def lex_string(s)\n @files.stream_stash([FileIO.new(StringIO.new(s), \"-\")])\n r = do_read_token\n next?(\"\\n\")\n p = get_pos(0)\n if peek != nil # EOF\n raise \"#{p}: unconsumed input: #{s}\"\n # errorp(p, \"unconsumed input: %s\", s)\n end\n @files.stream_unstash\n r\n end", "def get_next_object\n tmp = ''\n while @base_str[@cursor] == ' '\n @cursor += 1 # skip whitespaces\n end\n\n string_detected = false\n case @base_str[cursor]\n when '\"'\n @cursor += 1\n string_detected = true\n\n when '{'\n return process_hash\n\n when '['\n return procecss_array\n\n end\n\n # check for empty value\n if string_detected && @base_str[@cursor] == '\"'\n @cursor += 1\n return ''\n end\n\n b_continue = true\n while b_continue\n char = @base_str[@cursor]\n if char == '\\\\'\n escaped_char = @base_str[@cursor + 1]\n case escaped_char\n when '\"'\n tmp << \"\\\"\"\n when 'a'\n tmp << \"\\a\"\n when 'b'\n tmp << \"\\b\"\n when 'r'\n tmp << \"\\r\"\n when 'n'\n tmp << \"\\n\"\n when 's'\n tmp << \"\\s\"\n when 't'\n tmp << \"\\t\"\n else # for single \\\n tmp << char\n @cursor -= 1 # compensate shifting below\n end\n # tmp << @base_str[@cursor + 1]\n @cursor += 2\n else\n tmp << char\n @cursor += 1\n end\n\n b_continue = if string_detected\n @base_str[@cursor] != '\"'\n else\n @base_str[@cursor] != ' ' &&\n @base_str[@cursor] != '}' &&\n @base_str[@cursor] != ']' &&\n @base_str[@cursor] != ','\n end\n end\n\n @cursor += 1 if string_detected # skip end quotes\n\n # puts \"found obj: '#{tmp}'\"\n unless string_detected\n tmp = tmp != 'null' ? eval(tmp) : nil\n end\n tmp\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n if ss.peek(1) == \"\\n\"\n self.lineno += 1\n # line starts 1 position after the newline\n self.start_of_current_line_pos = ss.pos + 1\n end\n self.old_pos = ss.pos\n token =\n case state\n when nil then\n case\n when ss.skip(/[ \\t]+/) then\n # do nothing\n when ss.skip(/\\/\\/[^\\r\\n]*/) then\n # do nothing\n when text = ss.scan(/\\r|\\n/) then\n newline text\n when text = ss.scan(/[!=<>]=?/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/[(){},;.\\-+\\/*]/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/#{DIGIT}+(\\.#{DIGIT}+)?/) then\n action { [:NUMBER, text] }\n when text = ss.scan(/nil/) then\n action { [:NIL, text] }\n when text = ss.scan(/false/) then\n action { [:FALSE, text] }\n when text = ss.scan(/true/) then\n action { [:TRUE, text] }\n when text = ss.scan(/#{ALPHA}(#{ALPHA}|#{DIGIT})*/) then\n action { [:IDENTIFIER, text] }\n when ss.skip(/\"\"/) then\n action { [:STRING, '\"\"'] }\n when ss.skip(/\"/) then\n [:state, :IN_STRING]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n when :IN_STRING then\n case\n when text = ss.scan(/[^\"]+/) then\n action { [:STRING, \"\\\"#{text}\\\"\"] }\n when ss.skip(/\"/) then\n [:state, nil]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def match(str = T.unsafe(nil)); end", "def match(str = T.unsafe(nil)); end", "def on_string(ast_node, context)\n return ast_node.children[0]\n end", "def valid_substring?(string)\n [email protected]_at(string)\n end", "def lit(sexp, level)\n val = sexp.shift\n case val\n when Numeric\n val.inspect\n when Symbol\n @symbols[val.to_s] ||= \"$symbol_#{@sym_id += 1}\"\n when Regexp\n val == // ? /^/.inspect : val.inspect\n when Range\n \"$range(#{val.begin}, #{val.end}, #{val.exclude_end?})\"\n else\n raise \"Bad lit: #{val.inspect}\"\n end\n end", "def string(str); end", "def supports_standard_conforming_strings?\n true\n end", "def white_space\n if white_space?(@codes[@pos])\n begin\n @pos += 1\n end until !white_space?(@codes[@pos])\n return ECMA262::WhiteSpace.get\n else\n nil\n end\n end", "def emit_literal\n ts, te = literal.first[1], literal.last[2]\n text = literal.map {|t| t[0]}.join\n\n text.force_encoding('utf-8') if text.respond_to?(:force_encoding)\n\n self.literal = nil\n emit(:literal, :literal, text, ts, te)\n end", "def emit_literal\n ts, te = literal.first[1], literal.last[2]\n text = literal.map {|t| t[0]}.join\n\n text.force_encoding('utf-8') if text.respond_to?(:force_encoding)\n\n self.literal = nil\n emit(:literal, :literal, text, ts, te)\n end", "def next_input_element(hint)\n if ret = @lit_cache[@pos]\n @pos = @lit_nextpos[@pos]\n @head_pos = @pos\n return ret\n end\n pos0 = @pos\n #\n # skip white space here, because ECMA262(5.1.2) says:\n #\n # Simple white space and single-line comments are discarded and\n # do not appear in the stream of input elements for the\n # syntactic grammar.\n #\n while white_space or single_line_comment\n end\n\n ret = line_terminator || multi_line_comment || token\n if ret\n @lit_cache[pos0] = ret\n @lit_nextpos[pos0] = @pos\n @head_pos = @pos\n return ret\n end\n\n if @codes[@pos].nil?\n return nil\n end\n if hint.nil?\n if @codes[@pos] == 0x2f\n ECMA262::LIT_DIV_OR_REGEXP_LITERAL\n else\n nil\n end\n elsif hint == :div\n ret = div_punctuator\n if ret\n @lit_cache[pos0] = ret\n @lit_nextpos[pos0] = @pos\n end\n @head_pos = @pos\n return ret\n elsif hint == :regexp\n ret = regexp_literal\n if ret\n @lit_cache[pos0] = ret\n @lit_nextpos[pos0] = @pos\n end\n @head_pos = @pos\n return ret\n else\n if @codes[@pos] == 0x2f\n ECMA262::LIT_DIV_OR_REGEXP_LITERAL\n else\n nil\n end\n end\n end", "def test_string7\n input = '\" hello\\n there'\n token, value, rest = @c.lex(input)\n assert_equal(:open, token)\n assert_equal('\"', value)\n assert_equal('\" hello\\n there', rest)\n end", "def add_literal(literal)\n index = @literals.size\n @literals << literal\n return index\n end", "def is_string?(); @type == GRT_STRING; end" ]
[ "0.69983554", "0.6350717", "0.61525947", "0.5958539", "0.59156597", "0.58566576", "0.5797107", "0.56965303", "0.5658084", "0.56444263", "0.5640862", "0.5638683", "0.55706257", "0.553737", "0.5528167", "0.5525175", "0.5500137", "0.5494996", "0.5481568", "0.5480121", "0.5457949", "0.54533094", "0.5442078", "0.54375803", "0.5380667", "0.53759366", "0.53515047", "0.53375286", "0.53245175", "0.5290467", "0.52868", "0.52742004", "0.5249884", "0.5247142", "0.52109575", "0.5210169", "0.5205322", "0.52049786", "0.5195937", "0.5183152", "0.51746136", "0.5145038", "0.5123466", "0.51207924", "0.510091", "0.509142", "0.5090982", "0.50754935", "0.50590694", "0.505687", "0.5048909", "0.5042191", "0.5038709", "0.5035649", "0.50265044", "0.50179625", "0.4999411", "0.4961027", "0.49578157", "0.4953854", "0.4944632", "0.49371317", "0.49357668", "0.49252814", "0.49237406", "0.49233294", "0.49147397", "0.49137068", "0.49099988", "0.49082282", "0.48918602", "0.48889518", "0.48856625", "0.48770246", "0.4876808", "0.48711854", "0.4864825", "0.48573664", "0.48543483", "0.4849854", "0.48340395", "0.48329794", "0.4829788", "0.48254678", "0.48135504", "0.48131987", "0.48037288", "0.48037288", "0.48025823", "0.4774445", "0.47717106", "0.4761678", "0.47604057", "0.47456914", "0.47428623", "0.47428623", "0.47384906", "0.4731809", "0.47299817", "0.47229847" ]
0.7373966
0
Returns true if posision is at end of file
def eof? peek_lit(nil).nil? end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def at_end?\n peek.type == :eof\n end", "def eof?\n @pos == @data.bytesize\n end", "def eof?\n @position >= @size\n end", "def eof?\r\n false\r\n end", "def eof?\n @eof\n end", "def eof?\n @eof\n end", "def eof?\n @eof\n end", "def eof?\n @eof\n end", "def eof?\n @eof\n end", "def eof?\n @eof\n end", "def eof?\n @read >= @size\n end", "def eof_found?\n @eof_found\n end", "def eof?\n io.eof?\n end", "def eof?\n @io.eof?\n end", "def eof?\n @io.eof?\n end", "def eof?\n @io.eof?\n end", "def eof?\n if @buffer.size > 0\n false\n else\n @io.eof?\n end\n end", "def eof?()\n #This is a stub, used for indexing\n end", "def eof?\n wait_for_chars(1)\n @cursor == @buffer.length && @eof_found\n end", "def eof?\n @stream.eof?\n end", "def eof?\n return @stream.eof?\n end", "def eof?() end", "def eof?() end", "def eof?() end", "def eof?\n ref_line.nil? && output_line.nil?\n end", "def eof?\n end", "def end_of_stream?\n @next_chunk.nil?\n end", "def eof?\n @io.closed? || @io.eof?\n end", "def track_eof?\n true if track_eof\n end", "def eof?\n raise IOError, 'not opened for reading' if closed?\n @total == @size && @buffer.empty?\n end", "def prim_eof?\n false\n end", "def eof?\n @input.eof?\n end", "def eof_flag\n @eof_flag\n end", "def end?\r\n\t\tif(@allLines.empty?)\r\n\t\t\treturn true\r\n\t\telse\r\n\t\t\treturn false\r\n\t\tend\r\n\tend", "def eof; @file.eof; end", "def eof?; @io.size == @rio end", "def eof?\n ready_token\n if @buffer\n @buffer.empty? && @io.eof?\n else\n @io.eof?\n end\n end", "def eos?\n @pos >= @string.size\n end", "def eof?\n !@io || (@io.closed? || @io.eof?) && @buffer.empty?\n end", "def eof?\n !@io || (@io.closed? || @io.eof?) && @buffer.empty?\n end", "def eof?\n not busy?\n end", "def eof?\n stream = @_st_stream\n stream and stream.eof?\n end", "def eof?\n fill_rbuff if !@eof && @rbuffer.empty?\n @eof && @rbuffer.empty?\n end", "def end?()\n END_MARKER.equal? _next_element\n end", "def end?\n @cursor == @text.length\n end", "def end?\n @cursor == @text.length\n end", "def dot_is_end?\n @rule.right_tokens.count == @pos_index\n end", "def eof?\n\t\t\t\tfill_read_buffer if !@eof && @read_buffer.empty?\n\t\t\t\t\n\t\t\t\treturn @eof && @read_buffer.empty?\n\t\t\tend", "def end_of_file(arg)\n @offenses << error('At the end of file: Final newline missing') unless arg.readlines.last.match(/\\n/)\n end", "def readable_after_eof?\n false\n end", "def readable_after_eof?\n false\n end", "def eof\n if @sio_closed_read ; __require_readable ; end\n @sio_pos >= @sio_string.length\n end", "def eof?\n code == FX_EOF\n end", "def eof?\n @stdin.eof?\n end", "def eof?\n @stdin.eof?\n end", "def eof?\n @stdin.eof?\n end", "def eof?\n @state.eof_reached? && get_player_output.size < 1\n end", "def readable_after_eof?\n true\n end", "def readable_after_eof?\n true\n end", "def readable_after_eof?\n true\n end", "def readable_after_eof?\n true\n end", "def readable_after_eof?\n true\n end", "def readable_after_eof?\n true\n end", "def readable_after_eof?\n true\n end", "def readable_after_eof?\n true\n end", "def is_partition_eof?\n code == :partition_eof\n end", "def eof?\n @stmt.done?\n end", "def frozen?(file)\n file.words.at(file.content_offset + offset + size - 1) == end_marker\n end", "def eof?\n\t\tnot peak\n\tend", "def end?\n @status == :end\n end", "def buffer_empty?\n @line_no.zero? || @pos > @line.length - 1\n end", "def skip_to_eoln\r\n @source.get until @source.eoln?\r\n true\r\n end", "def eof?\n $stdin.closed?\n end", "def end_element?\n @contents[0] == :end_element\n end", "def eof?\n if @stdin.wait_readable(0.00001)\n c = @stdin.getc\n result = c.nil? ? true : false\n @stdin.ungetc(c) unless c.nil?\n result\n else # buffer is empty\n false\n end\n end", "def eof\n @pushback.nil? and (@input.nil? or @input.eof)\n end", "def eof!; end", "def eof?; end", "def eof?; end", "def ends_with?(s)\n rindex(s) == size - s.size\n end", "def ends_with?(s)\n rindex(s) == size - s.size\n end", "def check_eof( row )\n @eof = !row.has_key?( :row )\n end", "def fresh_line?\n @content.empty? || @content[-1].eql?(NL)\n end", "def isAtEnd\n return @index >= (@actions.size - 1)\n end", "def end_of_word?\n @end_of_word\n end", "def element_end?\n @reader.node_type == XML::Reader::TYPE_END_ELEMENT\n end", "def eof?\n @sock.closed?\n end", "def new_file?\n\t\t@filelineno == 1\n\tend", "def end_pos; end", "def end_pos; end", "def end_of_meta_data\n cur = tell\n\n seek 0\n line = _gets\n return unless META_SEP =~ line\n\n while line = _gets\n break if META_SEP =~ line\n end\n return if line.nil?\n tell\n\n ensure\n seek cur\n end", "def eof() end", "def eof() end", "def eof() end", "def end_with?(suffix)\n `if (self.lastIndexOf(suffix) == self.length - suffix.length) {\n return Qtrue;\n }\n\n return Qfalse;`\n end", "def end_with?(suffix)\n `if (self.lastIndexOf(suffix) == self.length - suffix.length) {\n return Qtrue;\n }\n\n return Qfalse;`\n end", "def eof\n end", "def test_file_must_contain_append_empty()\n\t\tCfruby::FileEdit.file_must_contain(@emptyfilename, \"new line\", :position => Cfruby::FileEdit::APPEND)\n\t\tFile.open(@emptyfilename, File::RDONLY) { |fp|\n\t\t\tlines = fp.readlines()\n\t\t\tassert_equal(\"new line\\n\", lines[-1])\n\t\t}\n\tend", "def last?\n position == bottom\n end", "def complete?\n buff.length < @maxlen\n end" ]
[ "0.80565715", "0.79875726", "0.79805964", "0.76524806", "0.7619679", "0.7619679", "0.7619679", "0.7590048", "0.7590048", "0.7590048", "0.7529541", "0.7505213", "0.7499178", "0.7473564", "0.7473564", "0.7473564", "0.74581635", "0.74108106", "0.73829067", "0.73551977", "0.73463655", "0.7323436", "0.7323436", "0.7323436", "0.7314971", "0.7280771", "0.72586703", "0.7195285", "0.7167634", "0.7166029", "0.71389997", "0.7130655", "0.7109719", "0.7084715", "0.70692486", "0.69897497", "0.6979407", "0.69728976", "0.69401205", "0.69401205", "0.6927844", "0.6908851", "0.69040895", "0.69010156", "0.6893137", "0.6893137", "0.6881983", "0.68681675", "0.6819923", "0.678399", "0.678399", "0.67788666", "0.6768181", "0.67414635", "0.67414635", "0.67414635", "0.6726432", "0.67194855", "0.67194855", "0.67194855", "0.67125136", "0.67125136", "0.67125136", "0.67125136", "0.67125136", "0.67035925", "0.6668872", "0.66607606", "0.6630395", "0.6586576", "0.6526042", "0.6425436", "0.6422263", "0.6420218", "0.64188725", "0.63857013", "0.63840073", "0.6372421", "0.6372421", "0.63567615", "0.63567615", "0.63553125", "0.6336309", "0.63362455", "0.62842256", "0.628045", "0.6268983", "0.62687504", "0.62098557", "0.62098557", "0.6206469", "0.62044126", "0.62044126", "0.62044126", "0.61589366", "0.61589366", "0.6115857", "0.6113554", "0.61135244", "0.60917705" ]
0.65727645
70
check next literal is strictly equal to _l_ or not. white spaces and line terminators are skipped and ignored. if next literal is not _l_, position is not forwarded if next literal is _l_, position is forwarded
def eql_lit?(l, hint = nil) lit = peek_lit(hint) if lit.eql? l fwd_after_peek lit else nil end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def match_lit_nolt?(l, hint = nil)\n lit = peek_lit_nolt(hint)\n if lit == l\n fwd_after_peek\n lit\n else\n nil\n end\n end", "def match_lit?(l, hint = nil)\n lit = peek_lit(hint)\n if lit == l\n fwd_after_peek\n lit\n else\n nil\n end\n end", "def is_lpar(latex, step)\n\tlatex[step+1..step+5].join == \"left(\"\nend", "def parse_lit\n case l.front.type\n when :str then parse_str_lit\n when :chr then parse_char_lit\n when :num then parse_num_lit\n else\n error \"expected a literal\"\n end\n end", "def lex_en_line_begin; end", "def lex_en_line_begin; end", "def lex_en_line_begin; end", "def eql_lit_nolt?(l, hint = nil)\n lit = peek_lit_nolt(hint)\n if lit.eql? l\n fwd_after_peek\n lit\n else\n nil\n end\n end", "def lex_en_expr_beg; end", "def lex_en_expr_beg; end", "def lex_en_expr_beg; end", "def _Le\n\n _save = self.pos\n while true # choice\n _tmp = apply(:_Nl)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_Eof)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_Le unless _tmp\n return _tmp\n end", "def _linear_white_space\n _save = self.pos\n\n _save1 = self.pos\n while true # sequence\n _save2 = self.pos\n _tmp = apply(:_CRLF)\n unless _tmp\n _tmp = true\n self.pos = _save2\n end\n unless _tmp\n self.pos = _save1\n break\n end\n _tmp = apply(:_LWSP_char)\n unless _tmp\n self.pos = _save1\n end\n break\n end # end sequence\n\n if _tmp\n while true\n\n _save3 = self.pos\n while true # sequence\n _save4 = self.pos\n _tmp = apply(:_CRLF)\n unless _tmp\n _tmp = true\n self.pos = _save4\n end\n unless _tmp\n self.pos = _save3\n break\n end\n _tmp = apply(:_LWSP_char)\n unless _tmp\n self.pos = _save3\n end\n break\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true\n else\n self.pos = _save\n end\n set_failed_rule :_linear_white_space unless _tmp\n return _tmp\n end", "def _literal\n\n _save = self.pos\n while true # choice\n _tmp = apply(:_number)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_string)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_literal unless _tmp\n return _tmp\n end", "def octal_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 70 )\n\n\n\n type = OctalLiteral\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 527:16: '0' ( '0' .. '7' )+\n match( 0x30 )\n # at file 527:20: ( '0' .. '7' )+\n match_count_22 = 0\n while true\n alt_22 = 2\n look_22_0 = @input.peek( 1 )\n\n if ( look_22_0.between?( 0x30, 0x37 ) )\n alt_22 = 1\n\n end\n case alt_22\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x30, 0x37 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n match_count_22 > 0 and break\n eee = EarlyExit(22)\n\n\n raise eee\n end\n match_count_22 += 1\n end\n\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 70 )\n\n\n end", "def next_line\r\n while true\r\n if (@lexemes[@pointer] != nil && @lexemes[@pointer] != '@')\r\n @pointer = @pointer.next\r\n else\r\n break\r\n end\r\n end\r\n end", "def lex_en_line_begin=(_arg0); end", "def lex_en_line_begin=(_arg0); end", "def lex_en_line_begin=(_arg0); end", "def advance\n r = yylex\n self.token = r\n\n raise \"yylex returned nil\" unless r\n\n return RubyLexer::EOF != r\n end", "def nl!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 51 )\n\n\n\n type = NL\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 230:5: ( '\\\\n' )+\n # at file 230:5: ( '\\\\n' )+\n match_count_8 = 0\n while true\n alt_8 = 2\n look_8_0 = @input.peek( 1 )\n\n if ( look_8_0 == 0xa )\n alt_8 = 1\n\n end\n case alt_8\n when 1\n # at line 230:5: '\\\\n'\n match( 0xa )\n\n else\n match_count_8 > 0 and break\n eee = EarlyExit(8)\n\n\n raise eee\n end\n match_count_8 += 1\n end\n\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 51 )\n\n\n end", "def next_item\n return @last_lexeme if @last_lexeme[0].nil?\n while true\n @line = next_line if buffer_empty?\n if @line.nil?\n lexeme = [nil, @line_no, 1]\n break\n end\n\n # Skip whitespaces\n while space?(@line[@pos])\n @pos += 1\n end\n\n # Skip triple dot characters (join lines)\n if @line[@pos, 4] == \"...\\n\" || @line[@pos, 2] == \"…\\n\"\n line_no, pos = @line_no, @pos + 1\n @line, @pos = next_line, 0\n if @line.nil? || @line.strip.empty?\n raise SyntaxError.new(line_no, pos, 'Line continuation may not be followed by an empty line')\n end\n next\n end\n\n # Skip one line comments\n if @line[@pos, 3] == 'BTW'\n @pos = @line.length - 1\n end\n # and multiline ones\n if @last_lexeme[0] == \"\\n\" && @line[@pos, 4] == 'OBTW'\n tldr_found, line_no, pos = false, @line_no, @pos + 1\n while true\n @line = next_line\n break if @line.nil?\n m = @line.chomp.match(/(^|\\s+)TLDR\\s*(,|$)/)\n unless m.nil?\n tldr_found = true\n @pos = m.end(0)\n break\n end\n end\n unless tldr_found\n raise SyntaxError.new(line_no, pos, 'Unterminated multiline comment')\n end\n next\n end\n\n if @line[@pos] == \"\\n\" || @line[@pos] == '!'\n # Handle newline and bang separately\n lexeme = [@line[@pos], @line_no, @pos + 1]\n @pos += 1\n elsif @line[@pos] == ','\n # Comma is a virtual newline\n lexeme = [\"\\n\", @line_no, @pos + 1]\n @pos += 1\n elsif @line[@pos] == '\"'\n # Strings begin with \"\n # Need to handle empty strings separately\n if @line[@pos + 1] == '\"'\n string = '\"\"'\n else\n m = @line.match(/([^:](?:::)*)\"/, @pos + 1)\n string = @line[@pos..m.end(0) - 1] unless m.nil?\n end\n # String must be followed by an allowed lexeme delimiter\n if string.nil? || !lexeme_delimiter?(@pos + string.length)\n raise SyntaxError.new(@line_no, @pos + 1, 'Unterminated string constant')\n end\n lexeme = [%Q[\"#{escape_string(string[1..-2])}\"], @line_no, @pos + 1]\n @pos = @pos + string.length\n else\n # Grab as much characters as we can until meeting lexeme delimiter\n # Treat what we grabbed as a lexeme\n seq, pos = '', @pos + 1\n until lexeme_delimiter?(@pos)\n seq += @line[@pos]\n @pos += 1\n end\n lexeme = [seq, @line_no, pos]\n end\n\n break\n end\n @last_lexeme = lexeme\n end", "def with_lineno?(node); end", "def literal\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 22 )\n\n\n value = nil\n\n\n a = nil\n\n\n begin\n # at line 142:3: (a= INTEGER |a= FLOAT |a= BOOLEAN |a= STRING |a= CHAR )\n alt_38 = 5\n case look_38 = @input.peek( 1 )\n when INTEGER then alt_38 = 1\n when FLOAT then alt_38 = 2\n when BOOLEAN then alt_38 = 3\n when STRING then alt_38 = 4\n when CHAR then alt_38 = 5\n else\n @state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )\n\n\n\n raise NoViableAlternative( \"\", 38, 0 )\n\n end\n case alt_38\n when 1\n # at line 142:5: a= INTEGER\n a = match( INTEGER, TOKENS_FOLLOWING_INTEGER_IN_literal_1037 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Int, a.text) \n # <-- action\n end\n\n\n when 2\n # at line 143:5: a= FLOAT\n a = match( FLOAT, TOKENS_FOLLOWING_FLOAT_IN_literal_1047 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Float, a.text) \n # <-- action\n end\n\n\n when 3\n # at line 144:5: a= BOOLEAN\n a = match( BOOLEAN, TOKENS_FOLLOWING_BOOLEAN_IN_literal_1059 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Bool, a.text) \n # <-- action\n end\n\n\n when 4\n # at line 145:5: a= STRING\n a = match( STRING, TOKENS_FOLLOWING_STRING_IN_literal_1069 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:String, a.text) \n # <-- action\n end\n\n\n when 5\n # at line 146:5: a= CHAR\n a = match( CHAR, TOKENS_FOLLOWING_CHAR_IN_literal_1080 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Char, a.text) \n # <-- action\n end\n\n\n end\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 22 )\n\n\n end\n\n return value\n end", "def printl!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 14 )\n\n type = PRINTL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 316:10: 'printl'\n match( \"printl\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 14 )\n\n end", "def double_angle_string_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 45)\n\n type = DOUBLE_ANGLE_STRING_LITERAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 500:4: '<<' ( . )* '>>'\n match(\"<<\")\n # at line 500:9: ( . )*\n loop do #loop 8\n alt_8 = 2\n look_8_0 = @input.peek(1)\n\n if (look_8_0 == ?>) \n look_8_1 = @input.peek(2)\n\n if (look_8_1 == ?>) \n alt_8 = 2\n elsif (look_8_1.between?(0x0000, ?=) || look_8_1.between?(??, 0xFFFF)) \n alt_8 = 1\n\n end\n elsif (look_8_0.between?(0x0000, ?=) || look_8_0.between?(??, 0xFFFF)) \n alt_8 = 1\n\n end\n case alt_8\n when 1\n # at line 500:9: .\n match_any\n\n else\n break #loop 8\n end\n end\n match(\">>\")\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 45)\n\n end", "def parse_nl\n s0 = @scanner.pos\n s2 = parse_newline\n if s2 == :failed\n s1 = :failed\n else\n s1 = []\n while s2 != :failed\n s1 << s2\n s2 = parse_newline\n end\n end\n if s1 == :failed\n @scanner.pos = s0\n :failed\n else\n s2 = []\n s3 = parse_skipline\n while s3 != :failed\n s2 << s3\n s3 = parse_skipline\n end\n [s1, s2]\n end\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n if ss.peek(1) == \"\\n\"\n self.lineno += 1\n # line starts 1 position after the newline\n self.start_of_current_line_pos = ss.pos + 1\n end\n self.old_pos = ss.pos\n token =\n case state\n when nil then\n case\n when ss.skip(/[ \\t]+/) then\n # do nothing\n when ss.skip(/\\/\\/[^\\r\\n]*/) then\n # do nothing\n when text = ss.scan(/\\r|\\n/) then\n newline text\n when text = ss.scan(/[!=<>]=?/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/[(){},;.\\-+\\/*]/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/#{DIGIT}+(\\.#{DIGIT}+)?/) then\n action { [:NUMBER, text] }\n when text = ss.scan(/nil/) then\n action { [:NIL, text] }\n when text = ss.scan(/false/) then\n action { [:FALSE, text] }\n when text = ss.scan(/true/) then\n action { [:TRUE, text] }\n when text = ss.scan(/#{ALPHA}(#{ALPHA}|#{DIGIT})*/) then\n action { [:IDENTIFIER, text] }\n when ss.skip(/\"\"/) then\n action { [:STRING, '\"\"'] }\n when ss.skip(/\"/) then\n [:state, :IN_STRING]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n when :IN_STRING then\n case\n when text = ss.scan(/[^\"]+/) then\n action { [:STRING, \"\\\"#{text}\\\"\"] }\n when ss.skip(/\"/) then\n [:state, nil]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def next_token\n\n if @ss.bol?\n @line+=1\n @[email protected]\n end\n\n position=[@line,@ss.pos-@old_pos+1]\n\n return :eos if @ss.eos?\n\n case\n when text = @ss.scan(NEWLINE)\n next_token()\n when text = @ss.scan(SPACE)\n next_token()\n when text = @ss.scan(COMMENT)\n next_token()\n when text = @ss.scan(ARROW)\n return Token.new [:arrow,text,position]\n when text = @ss.scan(LT)\n return Token.new [:lt,text,position]\n when text = @ss.scan(LBRACK)\n return Token.new [:lbrack,text,position]\n when text = @ss.scan(RBRACK)\n return Token.new [:rbrack,text,position]\n when text = @ss.scan(IDENTIFIER)\n case\n when value = text.match(IDENT)\n return Token.new [:IDENT,text,position]\n when value = text.match(FLOAT)\n return Token.new [:FLOAT,text,position]\n when value = text.match(INT)\n return Token.new [:INT,text,position]\n when value = text.match(STRING)\n return Token.new [:STRING,text,position]\n when value = text.match(MODULE)\n return Token.new [:module,text,position]\n when value = text.match(CLASS)\n return Token.new [:class,text,position]\n when value = text.match(END_)\n return Token.new [:end,text,position]\n when value = text.match(ATTR)\n return Token.new [:attr,text,position]\n when value = text.match(LPAREN)\n return Token.new [:lparen,text,position]\n when value = text.match(RPAREN)\n return Token.new [:rparen,text,position]\n else\n return Token.new [:identifier,text,position]\n end\n else\n x = @ss.getch\n return Token.new [x, x,position]\n end\n end", "def lex_en_line_comment; end", "def lex_en_line_comment; end", "def lex_en_line_comment; end", "def decimal_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 69 )\n\n\n\n type = DecimalLiteral\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 525:18: ( '0' | '1' .. '9' ( '0' .. '9' )* )\n # at line 525:18: ( '0' | '1' .. '9' ( '0' .. '9' )* )\n alt_21 = 2\n look_21_0 = @input.peek( 1 )\n\n if ( look_21_0 == 0x30 )\n alt_21 = 1\n elsif ( look_21_0.between?( 0x31, 0x39 ) )\n alt_21 = 2\n else\n raise NoViableAlternative( \"\", 21, 0 )\n\n end\n case alt_21\n when 1\n # at line 525:19: '0'\n match( 0x30 )\n\n when 2\n # at line 525:25: '1' .. '9' ( '0' .. '9' )*\n match_range( 0x31, 0x39 )\n # at line 525:34: ( '0' .. '9' )*\n while true # decision 20\n alt_20 = 2\n look_20_0 = @input.peek( 1 )\n\n if ( look_20_0.between?( 0x30, 0x39 ) )\n alt_20 = 1\n\n end\n case alt_20\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x30, 0x39 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n break # out of loop for decision 20\n end\n end # loop for decision 20\n\n\n end\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 69 )\n\n\n end", "def is_rpar(latex, step)\n\tlatex[step+1..step+5].join == \"right)\"\nend", "def eat_eol()\n if eol_as_token # if eol is significant in the language...\n position = build_position(:lexeme)\n eol_lexeme = scanner.scan(eol_pattern) # Consume the eol text\n eol_token = [:T_EOL, RaccLexer::Token.new(eol_lexeme, eol_lexeme, position)]\n queue.unshift eol_token\n else\n scanner.scan(eol_pattern) # Consume the eol text\n end\n\n @lineno += 1\n @line_offset = scanner.pos()\n end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def _EmptyLine\n\n _save = self.pos\n while true # sequence\n _tmp = scan(/\\G(?-mix:^)/)\n unless _tmp\n self.pos = _save\n break\n end\n _tmp = apply(:__hyphen_)\n unless _tmp\n self.pos = _save\n break\n end\n\n _save1 = self.pos\n while true # choice\n _tmp = apply(:_Nl)\n break if _tmp\n self.pos = _save1\n _tmp = apply(:_Comment)\n break if _tmp\n self.pos = _save1\n _tmp = apply(:_EofComment)\n break if _tmp\n self.pos = _save1\n break\n end # end choice\n\n unless _tmp\n self.pos = _save\n end\n break\n end # end sequence\n\n set_failed_rule :_EmptyLine unless _tmp\n return _tmp\n end", "def _rl_erase_at_end_of_line(l)\r\n _rl_backspace(l)\r\n @rl_outstream.write(' '*l)\r\n _rl_backspace(l)\r\n @_rl_last_c_pos -= l\r\n @visible_line[@_rl_last_c_pos,l] = 0.chr * l\r\n @rl_display_fixed = true if !@rl_display_fixed\r\n end", "def lt!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 27 )\n\n type = LT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 161:6: '<'\n match( 0x3c )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 27 )\n\n end", "def t__18!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 11 )\n\n type = T__18\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 27:9: 'L'\n match( 0x4c )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 11 )\n\n end", "def is_literal?\n if @operator && @operator.is_negation?\n @right_sentence.is_literal?\n else\n @left_sentence.nil? && @right_sentence.nil?\n end\n end", "def lmatch(a, lchar, rchar)\n\n token = []\n c = a.first\n token << c until (c = a.shift; c == lchar or c == rchar or a.empty?)\n token << c\n\n if c == lchar then\n found, tokenx, remainderx = rmatch(a, lchar, rchar)\n c = found\n token << tokenx\n remainder = remainderx\n else\n remainder = a.join\n end\n\n [c, token.join, remainder]\n end", "def lex_start; end", "def lex_start; end", "def lex_start; end", "def lex_start; end", "def leq!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 33 )\n\n type = LEQ\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 154:7: '<='\n match( \"<=\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 33 )\n\n end", "def lt!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 11 )\n\n\n\n type = LT\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 32:5: '<'\n match( 0x3c )\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 11 )\n\n\n end", "def peek_lit(hint)\n pos0 = @pos\n while lit = next_input_element(hint) and (lit.ws? or lit.lt?)\n end\n @pos = pos0\n lit\n end", "def lt!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 55 )\n\n type = LT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 382:6: '<'\n match( 0x3c )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 55 )\n\n end", "def la( count = 1 )\n until @lookahead.length >= count\n if token = @lexer.next_token( @lexer_state.number ) then\n @lookahead << token\n else\n nyi \"error handling for lexer error\" if @lexer.input_remaining?\n break\n end\n end\n \n return @lookahead[count - 1]\n end", "def inc_l\n end", "def literal?(node); end", "def lte!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 12 )\n\n\n\n type = LTE\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 33:6: '<='\n match( \"<=\" )\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 12 )\n\n\n end", "def jump_to_line l\n l = l.clamp 0, num_lines - 1\n return if @topline == l\n @topline = l\n @botline = [l + buffer.content_height, num_lines].min\n buffer.mark_dirty!\n end", "def next_l\n raise \"implement in subclass\"\n end", "def lex_en_expr_end; end", "def lex_en_expr_end; end", "def lex_en_expr_end; end", "def lshift!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 43 )\n\n type = LSHIFT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 164:10: '<<'\n match( \"<<\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 43 )\n\n end", "def lex_en_expr_beg=(_arg0); end", "def lex_en_expr_beg=(_arg0); end", "def lex_en_expr_beg=(_arg0); end", "def next_char\n @pos += 1\n if (c = @source[@pos..@pos]) == BACKSLASH\n @pos += 1\n [true, @source[@pos..@pos]]\n else\n [false, c]\n end\n end", "def le!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 57 )\n\n type = LE\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 384:6: '<='\n match( \"<=\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 57 )\n\n end", "def t__31!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 2 )\n\n\n\n type = T__31\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 8:9: '!='\n match( \"!=\" )\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 2 )\n\n\n end", "def parse_line(line)\n # If the previous line didn't and a logical line, we're not going to start one. If it did,\n # we're indeed going to start a new logical line\n @state[:ll_start] = @state[:ll_end]\n\n # We will start with the assumption that we're going to end the current logical line. We may layer\n # find out that we did not, in fact, do so.\n @state[:ll_end] = true\n\n # Reset the line continuator flag the the last line may have set to true\n @state[:line_continuator] = false\n\n # Find the first non-(space/tab) character\n index = 0\n while index < line.length && [\" \", \"\\t\"].include?(line[index])\n index += 1\n end\n @state[:indent_string] = line[0...index]\n\n # Iterate over the line's characters as long as there are any. We use different iteration\n # methods depending on whether we're inside a string or not\n index = 0\n while index < line.length\n if @state[:in_string].nil?\n index = parse_characters_normal(line, index)\n else\n index = parse_characters_in_string(line, index)\n end\n end\n\n # We have reached the end of the line. Decide whether or not the logical line ends here.\n @state[:ll_end] = @state[:in_string].nil? && @state[:open_braces] == 0 && !@state[:line_continuator]\n end", "def fresh_line?\n @content.empty? || @content[-1].eql?(NL)\n end", "def handled_labeled_list(line, level, margin, offset, prefix)\n prefix_length = prefix.length\n text = line.text\n flag = nil\n\n case prefix\n when /^\\[/ then\n flag = :LABELED\n prefix = prefix[1, prefix.length-2]\n when /:$/ then\n flag = :NOTE\n prefix.chop!\n else\n raise \"Invalid List Type: #{self.inspect}\"\n end\n\n # body is on the next line\n if text.length <= offset then\n original_line = line\n line = @lines.next\n return false unless line\n text = line.text\n\n for i in 0..margin\n if text[i] != SPACE\n @lines.unget\n return false\n end\n end\n\n i = margin\n i += 1 while text[i] == SPACE\n\n if i >= text.length then\n @lines.unget\n return false\n else\n offset = i\n prefix_length = 0\n\n if text[offset..-1] =~ SIMPLE_LIST_RE then\n @lines.unget\n line = original_line\n line.text = ''\n else\n @lines.delete original_line\n end\n end\n end\n\n line.stamp :LIST, level+1, prefix, flag\n text[margin, prefix_length] = \" \" * prefix_length\n assign_types_to_lines(offset, level + 1)\n return true\n end", "def lpar!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 5 )\n\n\n\n type = LPAR\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 273:4: '('\n match( 0x28 )\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 5 )\n\n\n end", "def skip_to_eoln\r\n @source.get until @source.eoln?\r\n true\r\n end", "def nextLine\r\n\t\twhile (@allLines[0] == \"\" || @allLines[0] == \"\\r\\n\" || @allLines[0] == \"\\n\")\r\n\t\t\[email protected]\r\n\t\tend\r\n\t\tif(@allLines[0]!=nil)\r\n\t\t\t@Line = @allLines[0]\r\n\t\t\[email protected]\r\n\t\t\tcheckSpace\r\n\t\tend\r\n\tend", "def push_literal(*args)\n new_literal = Literal.new(self, *args)\n @literal_stack.push(new_literal)\n\n if new_literal.words? && new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_backslash_delimited_words\n else\n self.class.lex_en_plain_backslash_delimited_words\n end\n elsif new_literal.words? && !new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_words\n else\n self.class.lex_en_plain_words\n end\n elsif !new_literal.words? && new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_backslash_delimited\n else\n self.class.lex_en_plain_backslash_delimited\n end\n else\n if new_literal.interpolate?\n self.class.lex_en_interp_string\n else\n self.class.lex_en_plain_string\n end\n end\n end", "def llader!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 8 )\n\n\n\n type = LLADER\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 285:4: '}'\n match( 0x7d )\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 8 )\n\n\n end", "def opening_brace_on_same_line?(node); end", "def char_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 41)\n\n type = CHAR_LITERAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 482:4: '\\\\'' LITERAL_CHAR '\\\\''\n match(?\\')\n literal_char!\n match(?\\')\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 41)\n\n end", "def lshift_asgn!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 46 )\n\n type = LSHIFT_ASGN\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 167:15: '<<='\n match( \"<<=\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 46 )\n\n end", "def t__19!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 12 )\n\n type = T__19\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 28:9: 'l'\n match( 0x6c )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 12 )\n\n end", "def eol!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 39 )\n\n type = EOL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 138:6: ( '\\\\r' )? '\\\\n'\n # at line 138:6: ( '\\\\r' )?\n alt_2 = 2\n look_2_0 = @input.peek( 1 )\n\n if ( look_2_0 == 0xd )\n alt_2 = 1\n end\n case alt_2\n when 1\n # at line 138:6: '\\\\r'\n match( 0xd )\n\n end\n match( 0xa )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 39 )\n\n end", "def next_token\n @sy = @tokenizer.next_token\n \n # ignore EOL tokens since no productions would accept them\n while @sy.type == TokenType::EOL_TOKEN\n @sy = @tokenizer.next_token\n end\n end", "def skip_white_space_or_to_eoln\r\n while (next_char = @source.get)\r\n return next_char if (next_char > ' ') || @source.eoln?\r\n end\r\n end", "def fwd_lit(hint)\n while lit = next_input_element(hint) and (lit.ws? or lit.lt?)\n end\n lit\n end", "def same_line_bracket_block?(result, iter, call_sexp)\n call_sexp.shift # discard the sexp_type, as the processor would\n syntactic = !Call.new(processor).arguments?(call_sexp) || iter.end_with?(\")\")\n stylistic = result !~ /\\n/ && result.size < LINE_LENGTH\n syntactic && stylistic\n end", "def lbracket!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 43 )\n\n type = LBRACKET\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 179:12: '('\n match( 0x28 )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 43 )\n\n end", "def lit(sexp, level)\n val = sexp.shift\n case val\n when Numeric\n val.inspect\n when Symbol\n @symbols[val.to_s] ||= \"$symbol_#{@sym_id += 1}\"\n when Regexp\n val == // ? /^/.inspect : val.inspect\n when Range\n \"$range(#{val.begin}, #{val.end}, #{val.exclude_end?})\"\n else\n raise \"Bad lit: #{val.inspect}\"\n end\n end", "def process_new_line\n @status.line += 1\n\n @skip_next = true if ((@cur_char == \"\\n\" && @next_char == \"\\r\") ||\n (@cur_char == \"\\r\" && @next_char == \"\\n\"))\n end", "def lineCheck (l1, l2)\n if(l1 == l2)\n true;\n else\n false;\n end\nend", "def lex(input)\n line = 1\n offset = 0\n ending = input.length\n\n until offset == ending do\n next_token(input, offset, line).tap do |token|\n raise UnconsumedInputError,\n \"Unmatched input #{input[offset..-1].inspect} on line #{line}\" if token.nil?\n\n token[:offset] = offset\n line, token[:line] = token[:line], line\n offset += token[:value].length\n yield token unless token[:discarded]\n end\n end\n\n yield ({ :name => :$end, :line => line, :value => nil, :offset => offset })\n end", "def t__23!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 12 )\n\n type = T__23\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 28:9: 'l'\n match( 0x6c )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 12 )\n\n end", "def correct_for_literals(lineno, column)\n tstring_index = @lex.index do |pos, token|\n pos[0] == lineno and pos[1] == column and\n token == :on_tstring_content\n end\n\n tstring_index ? @lex[tstring_index -1][0][1] : column\n end", "def eol!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 55 )\n\n type = EOL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 354:6: ( '\\\\r' )? '\\\\n'\n # at line 354:6: ( '\\\\r' )?\n alt_6 = 2\n look_6_0 = @input.peek( 1 )\n\n if ( look_6_0 == 0xd )\n alt_6 = 1\n end\n case alt_6\n when 1\n # at line 354:6: '\\\\r'\n match( 0xd )\n\n end\n match( 0xa )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 55 )\n\n end", "def lex_en_line_comment=(_arg0); end", "def lex_en_line_comment=(_arg0); end", "def lex_en_line_comment=(_arg0); end", "def literal\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 33 )\n return_value = LiteralReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n root_0 = nil\n\n _last = _first_0 = nil\n string_literal267 = nil\n __IVAR268__ = nil\n __ID269__ = nil\n string_literal270 = nil\n string_literal271 = nil\n string_literal272 = nil\n string_literal273 = nil\n __NUMBER274__ = nil\n __STRING275__ = nil\n __DOC276__ = nil\n __REGEX277__ = nil\n __ARRAY278__ = nil\n __OBJECT280__ = nil\n string_literal282 = nil\n __ID283__ = nil\n string_literal286 = nil\n argument279 = nil\n property_definition281 = nil\n parameters284 = nil\n statement_block285 = nil\n parameters287 = nil\n statement_block288 = nil\n\n tree_for_string_literal267 = nil\n tree_for_IVAR268 = nil\n tree_for_ID269 = nil\n tree_for_string_literal270 = nil\n tree_for_string_literal271 = nil\n tree_for_string_literal272 = nil\n tree_for_string_literal273 = nil\n tree_for_NUMBER274 = nil\n tree_for_STRING275 = nil\n tree_for_DOC276 = nil\n tree_for_REGEX277 = nil\n tree_for_ARRAY278 = nil\n tree_for_OBJECT280 = nil\n tree_for_string_literal282 = nil\n tree_for_ID283 = nil\n tree_for_string_literal286 = nil\n\n begin\n # at line 229:3: ( 'this' | IVAR | ID | 'null' | 'true' | 'false' | 'undefined' | NUMBER | STRING | DOC | REGEX | ^( ARRAY ( argument )* ) | ^( OBJECT ( property_definition )* ) | ^( 'function' ( ID )? parameters statement_block ) | ^( '->' ( parameters )? statement_block ) )\n alt_41 = 15\n case look_41 = @input.peek( 1 )\n when THIS then alt_41 = 1\n when IVAR then alt_41 = 2\n when ID then alt_41 = 3\n when NULL then alt_41 = 4\n when TRUE then alt_41 = 5\n when FALSE then alt_41 = 6\n when UNDEFINED then alt_41 = 7\n when NUMBER then alt_41 = 8\n when STRING then alt_41 = 9\n when DOC then alt_41 = 10\n when REGEX then alt_41 = 11\n when ARRAY then alt_41 = 12\n when OBJECT then alt_41 = 13\n when FUNCTION then alt_41 = 14\n when ARROW then alt_41 = 15\n else\n raise NoViableAlternative( \"\", 41, 0 )\n end\n case alt_41\n when 1\n root_0 = @adaptor.create_flat_list\n\n\n # at line 229:5: 'this'\n _last = @input.look\n string_literal267 = match( THIS, TOKENS_FOLLOWING_THIS_IN_literal_1643 )\n\n tree_for_string_literal267 = @adaptor.copy_node( string_literal267 )\n\n @adaptor.add_child( root_0, tree_for_string_literal267 )\n\n\n\n when 2\n root_0 = @adaptor.create_flat_list\n\n\n # at line 230:5: IVAR\n _last = @input.look\n __IVAR268__ = match( IVAR, TOKENS_FOLLOWING_IVAR_IN_literal_1649 )\n\n tree_for_IVAR268 = @adaptor.copy_node( __IVAR268__ )\n\n @adaptor.add_child( root_0, tree_for_IVAR268 )\n\n\n\n when 3\n root_0 = @adaptor.create_flat_list\n\n\n # at line 231:5: ID\n _last = @input.look\n __ID269__ = match( ID, TOKENS_FOLLOWING_ID_IN_literal_1655 )\n\n tree_for_ID269 = @adaptor.copy_node( __ID269__ )\n\n @adaptor.add_child( root_0, tree_for_ID269 )\n\n\n\n when 4\n root_0 = @adaptor.create_flat_list\n\n\n # at line 232:5: 'null'\n _last = @input.look\n string_literal270 = match( NULL, TOKENS_FOLLOWING_NULL_IN_literal_1661 )\n\n tree_for_string_literal270 = @adaptor.copy_node( string_literal270 )\n\n @adaptor.add_child( root_0, tree_for_string_literal270 )\n\n\n\n when 5\n root_0 = @adaptor.create_flat_list\n\n\n # at line 233:5: 'true'\n _last = @input.look\n string_literal271 = match( TRUE, TOKENS_FOLLOWING_TRUE_IN_literal_1667 )\n\n tree_for_string_literal271 = @adaptor.copy_node( string_literal271 )\n\n @adaptor.add_child( root_0, tree_for_string_literal271 )\n\n\n\n when 6\n root_0 = @adaptor.create_flat_list\n\n\n # at line 234:5: 'false'\n _last = @input.look\n string_literal272 = match( FALSE, TOKENS_FOLLOWING_FALSE_IN_literal_1673 )\n\n tree_for_string_literal272 = @adaptor.copy_node( string_literal272 )\n\n @adaptor.add_child( root_0, tree_for_string_literal272 )\n\n\n\n when 7\n root_0 = @adaptor.create_flat_list\n\n\n # at line 235:5: 'undefined'\n _last = @input.look\n string_literal273 = match( UNDEFINED, TOKENS_FOLLOWING_UNDEFINED_IN_literal_1679 )\n\n tree_for_string_literal273 = @adaptor.copy_node( string_literal273 )\n\n @adaptor.add_child( root_0, tree_for_string_literal273 )\n\n\n\n when 8\n root_0 = @adaptor.create_flat_list\n\n\n # at line 236:5: NUMBER\n _last = @input.look\n __NUMBER274__ = match( NUMBER, TOKENS_FOLLOWING_NUMBER_IN_literal_1685 )\n\n tree_for_NUMBER274 = @adaptor.copy_node( __NUMBER274__ )\n\n @adaptor.add_child( root_0, tree_for_NUMBER274 )\n\n\n\n when 9\n root_0 = @adaptor.create_flat_list\n\n\n # at line 237:5: STRING\n _last = @input.look\n __STRING275__ = match( STRING, TOKENS_FOLLOWING_STRING_IN_literal_1691 )\n\n tree_for_STRING275 = @adaptor.copy_node( __STRING275__ )\n\n @adaptor.add_child( root_0, tree_for_STRING275 )\n\n\n\n when 10\n root_0 = @adaptor.create_flat_list\n\n\n # at line 238:5: DOC\n _last = @input.look\n __DOC276__ = match( DOC, TOKENS_FOLLOWING_DOC_IN_literal_1697 )\n\n tree_for_DOC276 = @adaptor.copy_node( __DOC276__ )\n\n @adaptor.add_child( root_0, tree_for_DOC276 )\n\n\n\n when 11\n root_0 = @adaptor.create_flat_list\n\n\n # at line 239:5: REGEX\n _last = @input.look\n __REGEX277__ = match( REGEX, TOKENS_FOLLOWING_REGEX_IN_literal_1703 )\n\n tree_for_REGEX277 = @adaptor.copy_node( __REGEX277__ )\n\n @adaptor.add_child( root_0, tree_for_REGEX277 )\n\n\n\n when 12\n root_0 = @adaptor.create_flat_list\n\n\n # at line 240:5: ^( ARRAY ( argument )* )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __ARRAY278__ = match( ARRAY, TOKENS_FOLLOWING_ARRAY_IN_literal_1711 )\n\n tree_for_ARRAY278 = @adaptor.copy_node( __ARRAY278__ )\n\n root_1 = @adaptor.become_root( tree_for_ARRAY278, root_1 )\n\n\n\n if @input.peek == DOWN\n match( DOWN, nil )\n # at line 240:14: ( argument )*\n while true # decision 37\n alt_37 = 2\n look_37_0 = @input.peek( 1 )\n\n if ( look_37_0.between?( AMP, AMP_ASGN ) || look_37_0 == POST_DECR || look_37_0.between?( GEQ, AREF ) || look_37_0.between?( GREATER, HAT ) || look_37_0.between?( ARROW, HAT_ASGN ) || look_37_0 == ASGN || look_37_0 == REGEX || look_37_0 == IN || look_37_0 == INCR || look_37_0.between?( INSTANCEOF, RSHIFT3 ) || look_37_0 == RSHIFT3_ASGN || look_37_0.between?( RSHIFT_ASGN, COLON ) || look_37_0 == LEQ || look_37_0.between?( LESS, SLASH ) || look_37_0 == SLASH_ASGN || look_37_0.between?( STAR, DECR ) || look_37_0 == STAR_ASGN || look_37_0 == LSHIFT || look_37_0.between?( DELETE, THIS ) || look_37_0.between?( MINUS, TILDE ) || look_37_0.between?( MINUS_ASGN, MOD ) || look_37_0.between?( MOD_ASGN, TYPEOF ) || look_37_0.between?( NEQ, UMINUS ) || look_37_0.between?( NEQQ, UNDEFINED ) || look_37_0 == NEW || look_37_0 == NOT || look_37_0.between?( NULL, UPLUS ) || look_37_0 == OBJECT || look_37_0.between?( EQ, OR_ASGN ) || look_37_0 == FALSE || look_37_0 == PIPE || look_37_0 == PIPE_ASGN || look_37_0 == PLUS || look_37_0.between?( ID, DOC ) )\n alt_37 = 1\n\n end\n case alt_37\n when 1\n # at line 240:14: argument\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_argument_IN_literal_1713 )\n argument279 = argument\n @state.following.pop\n\n @adaptor.add_child( root_1, argument279.tree )\n\n\n else\n break # out of loop for decision 37\n end\n end # loop for decision 37\n\n match( UP, nil )\n end\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 13\n root_0 = @adaptor.create_flat_list\n\n\n # at line 241:5: ^( OBJECT ( property_definition )* )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __OBJECT280__ = match( OBJECT, TOKENS_FOLLOWING_OBJECT_IN_literal_1724 )\n\n tree_for_OBJECT280 = @adaptor.copy_node( __OBJECT280__ )\n\n root_1 = @adaptor.become_root( tree_for_OBJECT280, root_1 )\n\n\n\n if @input.peek == DOWN\n match( DOWN, nil )\n # at line 241:15: ( property_definition )*\n while true # decision 38\n alt_38 = 2\n look_38_0 = @input.peek( 1 )\n\n if ( look_38_0 == GET || look_38_0 == COLON || look_38_0 == SET )\n alt_38 = 1\n\n end\n case alt_38\n when 1\n # at line 241:15: property_definition\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_property_definition_IN_literal_1726 )\n property_definition281 = property_definition\n @state.following.pop\n\n @adaptor.add_child( root_1, property_definition281.tree )\n\n\n else\n break # out of loop for decision 38\n end\n end # loop for decision 38\n\n match( UP, nil )\n end\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 14\n root_0 = @adaptor.create_flat_list\n\n\n # at line 242:5: ^( 'function' ( ID )? parameters statement_block )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal282 = match( FUNCTION, TOKENS_FOLLOWING_FUNCTION_IN_literal_1737 )\n\n tree_for_string_literal282 = @adaptor.copy_node( string_literal282 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal282, root_1 )\n\n\n\n match( DOWN, nil )\n # at line 242:19: ( ID )?\n alt_39 = 2\n look_39_0 = @input.peek( 1 )\n\n if ( look_39_0 == ID )\n alt_39 = 1\n end\n case alt_39\n when 1\n # at line 242:19: ID\n _last = @input.look\n __ID283__ = match( ID, TOKENS_FOLLOWING_ID_IN_literal_1739 )\n\n tree_for_ID283 = @adaptor.copy_node( __ID283__ )\n\n @adaptor.add_child( root_1, tree_for_ID283 )\n\n\n\n end\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_parameters_IN_literal_1742 )\n parameters284 = parameters\n @state.following.pop\n\n @adaptor.add_child( root_1, parameters284.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_statement_block_IN_literal_1744 )\n statement_block285 = statement_block\n @state.following.pop\n\n @adaptor.add_child( root_1, statement_block285.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 15\n root_0 = @adaptor.create_flat_list\n\n\n # at line 243:5: ^( '->' ( parameters )? statement_block )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal286 = match( ARROW, TOKENS_FOLLOWING_ARROW_IN_literal_1754 )\n\n tree_for_string_literal286 = @adaptor.copy_node( string_literal286 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal286, root_1 )\n\n\n\n match( DOWN, nil )\n # at line 243:13: ( parameters )?\n alt_40 = 2\n look_40_0 = @input.peek( 1 )\n\n if ( look_40_0 == PARAMS )\n alt_40 = 1\n end\n case alt_40\n when 1\n # at line 243:13: parameters\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_parameters_IN_literal_1756 )\n parameters287 = parameters\n @state.following.pop\n\n @adaptor.add_child( root_1, parameters287.tree )\n\n\n end\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_statement_block_IN_literal_1759 )\n statement_block288 = statement_block\n @state.following.pop\n\n @adaptor.add_child( root_1, statement_block288.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n end\n return_value.tree = @adaptor.rule_post_processing( root_0 )\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 33 )\n\n end\n \n return return_value\n end", "def indent_line\n result = false\n level = calculate_indentation\n return result if level.nil? || level < 0\n @buffer.save_excursion do\n @buffer.beginning_of_line\n @buffer.composite_edit do\n if @buffer.looking_at?(/[ \\t]+/)\n s = @buffer.match_string(0)\n break if /\\t/ !~ s && s.size == level\n @buffer.delete_region(@buffer.match_beginning(0),\n @buffer.match_end(0))\n else\n break if level == 0\n end\n @buffer.indent_to(level)\n end\n result = true\n end\n pos = @buffer.point\n @buffer.beginning_of_line\n @buffer.forward_char while /[ \\t]/ =~ @buffer.char_after\n if @buffer.point < pos\n @buffer.goto_char(pos)\n end\n result\n end", "def ignorable_nl!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 106 )\n\n \n # - - - - main rule block - - - -\n # at line 948:3: ( '//' (~ ( '\\\\n' | '\\\\r' ) )* ( ( '\\\\r' )? '\\\\n' )? | '/*' ( . )* '*/' | ( '\\\\t' | '\\\\f' | ' ' | '\\\\u00A0' | '\\\\n' | '\\\\r' )+ )\n alt_50 = 3\n look_50_0 = @input.peek( 1 )\n\n if ( look_50_0 == 0x2f )\n look_50_1 = @input.peek( 2 )\n\n if ( look_50_1 == 0x2f )\n alt_50 = 1\n elsif ( look_50_1 == 0x2a )\n alt_50 = 2\n else\n raise NoViableAlternative( \"\", 50, 1 )\n end\n elsif ( look_50_0.between?( 0x9, 0xa ) || look_50_0.between?( 0xc, 0xd ) || look_50_0 == 0x20 || look_50_0 == 0xa0 )\n alt_50 = 3\n else\n raise NoViableAlternative( \"\", 50, 0 )\n end\n case alt_50\n when 1\n # at line 948:5: '//' (~ ( '\\\\n' | '\\\\r' ) )* ( ( '\\\\r' )? '\\\\n' )?\n match( \"//\" )\n # at line 948:10: (~ ( '\\\\n' | '\\\\r' ) )*\n while true # decision 45\n alt_45 = 2\n look_45_0 = @input.peek( 1 )\n\n if ( look_45_0.between?( 0x0, 0x9 ) || look_45_0.between?( 0xb, 0xc ) || look_45_0.between?( 0xe, 0xffff ) )\n alt_45 = 1\n\n end\n case alt_45\n when 1\n # at line 948:10: ~ ( '\\\\n' | '\\\\r' )\n if @input.peek( 1 ).between?( 0x0, 0x9 ) || @input.peek( 1 ).between?( 0xb, 0xc ) || @input.peek( 1 ).between?( 0xe, 0xff )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n\n else\n break # out of loop for decision 45\n end\n end # loop for decision 45\n # at line 948:28: ( ( '\\\\r' )? '\\\\n' )?\n alt_47 = 2\n look_47_0 = @input.peek( 1 )\n\n if ( look_47_0 == 0xa || look_47_0 == 0xd )\n alt_47 = 1\n end\n case alt_47\n when 1\n # at line 948:30: ( '\\\\r' )? '\\\\n'\n # at line 948:30: ( '\\\\r' )?\n alt_46 = 2\n look_46_0 = @input.peek( 1 )\n\n if ( look_46_0 == 0xd )\n alt_46 = 1\n end\n case alt_46\n when 1\n # at line 948:30: '\\\\r'\n match( 0xd )\n\n end\n match( 0xa )\n\n end\n\n when 2\n # at line 949:5: '/*' ( . )* '*/'\n match( \"/*\" )\n # at line 949:10: ( . )*\n while true # decision 48\n alt_48 = 2\n look_48_0 = @input.peek( 1 )\n\n if ( look_48_0 == 0x2a )\n look_48_1 = @input.peek( 2 )\n\n if ( look_48_1 == 0x2f )\n alt_48 = 2\n elsif ( look_48_1.between?( 0x0, 0x2e ) || look_48_1.between?( 0x30, 0xffff ) )\n alt_48 = 1\n\n end\n elsif ( look_48_0.between?( 0x0, 0x29 ) || look_48_0.between?( 0x2b, 0xffff ) )\n alt_48 = 1\n\n end\n case alt_48\n when 1\n # at line 949:10: .\n match_any\n\n else\n break # out of loop for decision 48\n end\n end # loop for decision 48\n match( \"*/\" )\n\n when 3\n # at line 950:5: ( '\\\\t' | '\\\\f' | ' ' | '\\\\u00A0' | '\\\\n' | '\\\\r' )+\n # at file 950:5: ( '\\\\t' | '\\\\f' | ' ' | '\\\\u00A0' | '\\\\n' | '\\\\r' )+\n match_count_49 = 0\n while true\n alt_49 = 2\n look_49_0 = @input.peek( 1 )\n\n if ( look_49_0.between?( 0x9, 0xa ) || look_49_0.between?( 0xc, 0xd ) || look_49_0 == 0x20 || look_49_0 == 0xa0 )\n alt_49 = 1\n\n end\n case alt_49\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x9, 0xa ) || @input.peek( 1 ).between?( 0xc, 0xd ) || @input.peek(1) == 0x20 || @input.peek(1) == 0xa0\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n\n else\n match_count_49 > 0 and break\n eee = EarlyExit(49)\n\n\n raise eee\n end\n match_count_49 += 1\n end\n\n\n end\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 106 )\n\n end" ]
[ "0.61675394", "0.6134503", "0.60336643", "0.58739287", "0.5837285", "0.5837285", "0.5837285", "0.5816137", "0.5600111", "0.5600111", "0.5600111", "0.5549727", "0.55136937", "0.5398834", "0.5327225", "0.5316233", "0.5312511", "0.5312511", "0.5312511", "0.5294847", "0.5272985", "0.5226127", "0.5205091", "0.51896554", "0.51664954", "0.51616466", "0.51614594", "0.51523435", "0.51456803", "0.5098138", "0.5098138", "0.5098138", "0.5092082", "0.50892085", "0.50820184", "0.50819373", "0.50819373", "0.50819373", "0.50819373", "0.50701165", "0.5064002", "0.5061482", "0.5059191", "0.50489354", "0.5028081", "0.50175107", "0.50175107", "0.50175107", "0.50175107", "0.5010826", "0.5005159", "0.49986437", "0.49893826", "0.4985163", "0.49813336", "0.4980485", "0.4974699", "0.49656504", "0.49636096", "0.49633247", "0.49633247", "0.49633247", "0.49553442", "0.49482608", "0.49482608", "0.49482608", "0.49437547", "0.49403", "0.49328545", "0.4916709", "0.49166512", "0.49069482", "0.49060294", "0.48980394", "0.48973313", "0.48948857", "0.48794326", "0.48659828", "0.48605078", "0.48514512", "0.48482415", "0.48377278", "0.4832635", "0.4830863", "0.4825154", "0.48201302", "0.48192206", "0.48075196", "0.48071724", "0.480441", "0.4794528", "0.47925073", "0.4790558", "0.4785854", "0.47787586", "0.47787586", "0.47787586", "0.47783923", "0.47743344", "0.4773143" ]
0.584392
4
check next literal is strictly equal to _l_ or not. white spaces are skipped and ignored. line terminators are not ignored. if next literal is not _l_, position is not forwarded if next literal is _l_, position is forwarded
def eql_lit_nolt?(l, hint = nil) lit = peek_lit_nolt(hint) if lit.eql? l fwd_after_peek lit else nil end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def match_lit_nolt?(l, hint = nil)\n lit = peek_lit_nolt(hint)\n if lit == l\n fwd_after_peek\n lit\n else\n nil\n end\n end", "def match_lit?(l, hint = nil)\n lit = peek_lit(hint)\n if lit == l\n fwd_after_peek\n lit\n else\n nil\n end\n end", "def is_lpar(latex, step)\n\tlatex[step+1..step+5].join == \"left(\"\nend", "def eql_lit?(l, hint = nil)\n lit = peek_lit(hint)\n if lit.eql? l\n fwd_after_peek\n lit\n else\n nil\n end\n end", "def lex_en_line_begin; end", "def lex_en_line_begin; end", "def lex_en_line_begin; end", "def parse_lit\n case l.front.type\n when :str then parse_str_lit\n when :chr then parse_char_lit\n when :num then parse_num_lit\n else\n error \"expected a literal\"\n end\n end", "def lex_en_expr_beg; end", "def lex_en_expr_beg; end", "def lex_en_expr_beg; end", "def _Le\n\n _save = self.pos\n while true # choice\n _tmp = apply(:_Nl)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_Eof)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_Le unless _tmp\n return _tmp\n end", "def _linear_white_space\n _save = self.pos\n\n _save1 = self.pos\n while true # sequence\n _save2 = self.pos\n _tmp = apply(:_CRLF)\n unless _tmp\n _tmp = true\n self.pos = _save2\n end\n unless _tmp\n self.pos = _save1\n break\n end\n _tmp = apply(:_LWSP_char)\n unless _tmp\n self.pos = _save1\n end\n break\n end # end sequence\n\n if _tmp\n while true\n\n _save3 = self.pos\n while true # sequence\n _save4 = self.pos\n _tmp = apply(:_CRLF)\n unless _tmp\n _tmp = true\n self.pos = _save4\n end\n unless _tmp\n self.pos = _save3\n break\n end\n _tmp = apply(:_LWSP_char)\n unless _tmp\n self.pos = _save3\n end\n break\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true\n else\n self.pos = _save\n end\n set_failed_rule :_linear_white_space unless _tmp\n return _tmp\n end", "def _literal\n\n _save = self.pos\n while true # choice\n _tmp = apply(:_number)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_string)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_literal unless _tmp\n return _tmp\n end", "def next_line\r\n while true\r\n if (@lexemes[@pointer] != nil && @lexemes[@pointer] != '@')\r\n @pointer = @pointer.next\r\n else\r\n break\r\n end\r\n end\r\n end", "def octal_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 70 )\n\n\n\n type = OctalLiteral\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 527:16: '0' ( '0' .. '7' )+\n match( 0x30 )\n # at file 527:20: ( '0' .. '7' )+\n match_count_22 = 0\n while true\n alt_22 = 2\n look_22_0 = @input.peek( 1 )\n\n if ( look_22_0.between?( 0x30, 0x37 ) )\n alt_22 = 1\n\n end\n case alt_22\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x30, 0x37 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n match_count_22 > 0 and break\n eee = EarlyExit(22)\n\n\n raise eee\n end\n match_count_22 += 1\n end\n\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 70 )\n\n\n end", "def lex_en_line_begin=(_arg0); end", "def lex_en_line_begin=(_arg0); end", "def lex_en_line_begin=(_arg0); end", "def advance\n r = yylex\n self.token = r\n\n raise \"yylex returned nil\" unless r\n\n return RubyLexer::EOF != r\n end", "def next_item\n return @last_lexeme if @last_lexeme[0].nil?\n while true\n @line = next_line if buffer_empty?\n if @line.nil?\n lexeme = [nil, @line_no, 1]\n break\n end\n\n # Skip whitespaces\n while space?(@line[@pos])\n @pos += 1\n end\n\n # Skip triple dot characters (join lines)\n if @line[@pos, 4] == \"...\\n\" || @line[@pos, 2] == \"…\\n\"\n line_no, pos = @line_no, @pos + 1\n @line, @pos = next_line, 0\n if @line.nil? || @line.strip.empty?\n raise SyntaxError.new(line_no, pos, 'Line continuation may not be followed by an empty line')\n end\n next\n end\n\n # Skip one line comments\n if @line[@pos, 3] == 'BTW'\n @pos = @line.length - 1\n end\n # and multiline ones\n if @last_lexeme[0] == \"\\n\" && @line[@pos, 4] == 'OBTW'\n tldr_found, line_no, pos = false, @line_no, @pos + 1\n while true\n @line = next_line\n break if @line.nil?\n m = @line.chomp.match(/(^|\\s+)TLDR\\s*(,|$)/)\n unless m.nil?\n tldr_found = true\n @pos = m.end(0)\n break\n end\n end\n unless tldr_found\n raise SyntaxError.new(line_no, pos, 'Unterminated multiline comment')\n end\n next\n end\n\n if @line[@pos] == \"\\n\" || @line[@pos] == '!'\n # Handle newline and bang separately\n lexeme = [@line[@pos], @line_no, @pos + 1]\n @pos += 1\n elsif @line[@pos] == ','\n # Comma is a virtual newline\n lexeme = [\"\\n\", @line_no, @pos + 1]\n @pos += 1\n elsif @line[@pos] == '\"'\n # Strings begin with \"\n # Need to handle empty strings separately\n if @line[@pos + 1] == '\"'\n string = '\"\"'\n else\n m = @line.match(/([^:](?:::)*)\"/, @pos + 1)\n string = @line[@pos..m.end(0) - 1] unless m.nil?\n end\n # String must be followed by an allowed lexeme delimiter\n if string.nil? || !lexeme_delimiter?(@pos + string.length)\n raise SyntaxError.new(@line_no, @pos + 1, 'Unterminated string constant')\n end\n lexeme = [%Q[\"#{escape_string(string[1..-2])}\"], @line_no, @pos + 1]\n @pos = @pos + string.length\n else\n # Grab as much characters as we can until meeting lexeme delimiter\n # Treat what we grabbed as a lexeme\n seq, pos = '', @pos + 1\n until lexeme_delimiter?(@pos)\n seq += @line[@pos]\n @pos += 1\n end\n lexeme = [seq, @line_no, pos]\n end\n\n break\n end\n @last_lexeme = lexeme\n end", "def nl!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 51 )\n\n\n\n type = NL\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 230:5: ( '\\\\n' )+\n # at file 230:5: ( '\\\\n' )+\n match_count_8 = 0\n while true\n alt_8 = 2\n look_8_0 = @input.peek( 1 )\n\n if ( look_8_0 == 0xa )\n alt_8 = 1\n\n end\n case alt_8\n when 1\n # at line 230:5: '\\\\n'\n match( 0xa )\n\n else\n match_count_8 > 0 and break\n eee = EarlyExit(8)\n\n\n raise eee\n end\n match_count_8 += 1\n end\n\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 51 )\n\n\n end", "def with_lineno?(node); end", "def literal\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 22 )\n\n\n value = nil\n\n\n a = nil\n\n\n begin\n # at line 142:3: (a= INTEGER |a= FLOAT |a= BOOLEAN |a= STRING |a= CHAR )\n alt_38 = 5\n case look_38 = @input.peek( 1 )\n when INTEGER then alt_38 = 1\n when FLOAT then alt_38 = 2\n when BOOLEAN then alt_38 = 3\n when STRING then alt_38 = 4\n when CHAR then alt_38 = 5\n else\n @state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )\n\n\n\n raise NoViableAlternative( \"\", 38, 0 )\n\n end\n case alt_38\n when 1\n # at line 142:5: a= INTEGER\n a = match( INTEGER, TOKENS_FOLLOWING_INTEGER_IN_literal_1037 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Int, a.text) \n # <-- action\n end\n\n\n when 2\n # at line 143:5: a= FLOAT\n a = match( FLOAT, TOKENS_FOLLOWING_FLOAT_IN_literal_1047 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Float, a.text) \n # <-- action\n end\n\n\n when 3\n # at line 144:5: a= BOOLEAN\n a = match( BOOLEAN, TOKENS_FOLLOWING_BOOLEAN_IN_literal_1059 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Bool, a.text) \n # <-- action\n end\n\n\n when 4\n # at line 145:5: a= STRING\n a = match( STRING, TOKENS_FOLLOWING_STRING_IN_literal_1069 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:String, a.text) \n # <-- action\n end\n\n\n when 5\n # at line 146:5: a= CHAR\n a = match( CHAR, TOKENS_FOLLOWING_CHAR_IN_literal_1080 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Char, a.text) \n # <-- action\n end\n\n\n end\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 22 )\n\n\n end\n\n return value\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n if ss.peek(1) == \"\\n\"\n self.lineno += 1\n # line starts 1 position after the newline\n self.start_of_current_line_pos = ss.pos + 1\n end\n self.old_pos = ss.pos\n token =\n case state\n when nil then\n case\n when ss.skip(/[ \\t]+/) then\n # do nothing\n when ss.skip(/\\/\\/[^\\r\\n]*/) then\n # do nothing\n when text = ss.scan(/\\r|\\n/) then\n newline text\n when text = ss.scan(/[!=<>]=?/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/[(){},;.\\-+\\/*]/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/#{DIGIT}+(\\.#{DIGIT}+)?/) then\n action { [:NUMBER, text] }\n when text = ss.scan(/nil/) then\n action { [:NIL, text] }\n when text = ss.scan(/false/) then\n action { [:FALSE, text] }\n when text = ss.scan(/true/) then\n action { [:TRUE, text] }\n when text = ss.scan(/#{ALPHA}(#{ALPHA}|#{DIGIT})*/) then\n action { [:IDENTIFIER, text] }\n when ss.skip(/\"\"/) then\n action { [:STRING, '\"\"'] }\n when ss.skip(/\"/) then\n [:state, :IN_STRING]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n when :IN_STRING then\n case\n when text = ss.scan(/[^\"]+/) then\n action { [:STRING, \"\\\"#{text}\\\"\"] }\n when ss.skip(/\"/) then\n [:state, nil]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def parse_nl\n s0 = @scanner.pos\n s2 = parse_newline\n if s2 == :failed\n s1 = :failed\n else\n s1 = []\n while s2 != :failed\n s1 << s2\n s2 = parse_newline\n end\n end\n if s1 == :failed\n @scanner.pos = s0\n :failed\n else\n s2 = []\n s3 = parse_skipline\n while s3 != :failed\n s2 << s3\n s3 = parse_skipline\n end\n [s1, s2]\n end\n end", "def double_angle_string_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 45)\n\n type = DOUBLE_ANGLE_STRING_LITERAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 500:4: '<<' ( . )* '>>'\n match(\"<<\")\n # at line 500:9: ( . )*\n loop do #loop 8\n alt_8 = 2\n look_8_0 = @input.peek(1)\n\n if (look_8_0 == ?>) \n look_8_1 = @input.peek(2)\n\n if (look_8_1 == ?>) \n alt_8 = 2\n elsif (look_8_1.between?(0x0000, ?=) || look_8_1.between?(??, 0xFFFF)) \n alt_8 = 1\n\n end\n elsif (look_8_0.between?(0x0000, ?=) || look_8_0.between?(??, 0xFFFF)) \n alt_8 = 1\n\n end\n case alt_8\n when 1\n # at line 500:9: .\n match_any\n\n else\n break #loop 8\n end\n end\n match(\">>\")\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 45)\n\n end", "def next_token\n\n if @ss.bol?\n @line+=1\n @[email protected]\n end\n\n position=[@line,@ss.pos-@old_pos+1]\n\n return :eos if @ss.eos?\n\n case\n when text = @ss.scan(NEWLINE)\n next_token()\n when text = @ss.scan(SPACE)\n next_token()\n when text = @ss.scan(COMMENT)\n next_token()\n when text = @ss.scan(ARROW)\n return Token.new [:arrow,text,position]\n when text = @ss.scan(LT)\n return Token.new [:lt,text,position]\n when text = @ss.scan(LBRACK)\n return Token.new [:lbrack,text,position]\n when text = @ss.scan(RBRACK)\n return Token.new [:rbrack,text,position]\n when text = @ss.scan(IDENTIFIER)\n case\n when value = text.match(IDENT)\n return Token.new [:IDENT,text,position]\n when value = text.match(FLOAT)\n return Token.new [:FLOAT,text,position]\n when value = text.match(INT)\n return Token.new [:INT,text,position]\n when value = text.match(STRING)\n return Token.new [:STRING,text,position]\n when value = text.match(MODULE)\n return Token.new [:module,text,position]\n when value = text.match(CLASS)\n return Token.new [:class,text,position]\n when value = text.match(END_)\n return Token.new [:end,text,position]\n when value = text.match(ATTR)\n return Token.new [:attr,text,position]\n when value = text.match(LPAREN)\n return Token.new [:lparen,text,position]\n when value = text.match(RPAREN)\n return Token.new [:rparen,text,position]\n else\n return Token.new [:identifier,text,position]\n end\n else\n x = @ss.getch\n return Token.new [x, x,position]\n end\n end", "def is_rpar(latex, step)\n\tlatex[step+1..step+5].join == \"right)\"\nend", "def decimal_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 69 )\n\n\n\n type = DecimalLiteral\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 525:18: ( '0' | '1' .. '9' ( '0' .. '9' )* )\n # at line 525:18: ( '0' | '1' .. '9' ( '0' .. '9' )* )\n alt_21 = 2\n look_21_0 = @input.peek( 1 )\n\n if ( look_21_0 == 0x30 )\n alt_21 = 1\n elsif ( look_21_0.between?( 0x31, 0x39 ) )\n alt_21 = 2\n else\n raise NoViableAlternative( \"\", 21, 0 )\n\n end\n case alt_21\n when 1\n # at line 525:19: '0'\n match( 0x30 )\n\n when 2\n # at line 525:25: '1' .. '9' ( '0' .. '9' )*\n match_range( 0x31, 0x39 )\n # at line 525:34: ( '0' .. '9' )*\n while true # decision 20\n alt_20 = 2\n look_20_0 = @input.peek( 1 )\n\n if ( look_20_0.between?( 0x30, 0x39 ) )\n alt_20 = 1\n\n end\n case alt_20\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x30, 0x39 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n break # out of loop for decision 20\n end\n end # loop for decision 20\n\n\n end\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 69 )\n\n\n end", "def printl!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 14 )\n\n type = PRINTL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 316:10: 'printl'\n match( \"printl\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 14 )\n\n end", "def lex_en_line_comment; end", "def lex_en_line_comment; end", "def lex_en_line_comment; end", "def _EmptyLine\n\n _save = self.pos\n while true # sequence\n _tmp = scan(/\\G(?-mix:^)/)\n unless _tmp\n self.pos = _save\n break\n end\n _tmp = apply(:__hyphen_)\n unless _tmp\n self.pos = _save\n break\n end\n\n _save1 = self.pos\n while true # choice\n _tmp = apply(:_Nl)\n break if _tmp\n self.pos = _save1\n _tmp = apply(:_Comment)\n break if _tmp\n self.pos = _save1\n _tmp = apply(:_EofComment)\n break if _tmp\n self.pos = _save1\n break\n end # end choice\n\n unless _tmp\n self.pos = _save\n end\n break\n end # end sequence\n\n set_failed_rule :_EmptyLine unless _tmp\n return _tmp\n end", "def leq!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 33 )\n\n type = LEQ\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 154:7: '<='\n match( \"<=\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 33 )\n\n end", "def t__18!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 11 )\n\n type = T__18\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 27:9: 'L'\n match( 0x4c )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 11 )\n\n end", "def lmatch(a, lchar, rchar)\n\n token = []\n c = a.first\n token << c until (c = a.shift; c == lchar or c == rchar or a.empty?)\n token << c\n\n if c == lchar then\n found, tokenx, remainderx = rmatch(a, lchar, rchar)\n c = found\n token << tokenx\n remainder = remainderx\n else\n remainder = a.join\n end\n\n [c, token.join, remainder]\n end", "def is_literal?\n if @operator && @operator.is_negation?\n @right_sentence.is_literal?\n else\n @left_sentence.nil? && @right_sentence.nil?\n end\n end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def lt!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 27 )\n\n type = LT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 161:6: '<'\n match( 0x3c )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 27 )\n\n end", "def jump_to_line l\n l = l.clamp 0, num_lines - 1\n return if @topline == l\n @topline = l\n @botline = [l + buffer.content_height, num_lines].min\n buffer.mark_dirty!\n end", "def parse_line(line)\n # If the previous line didn't and a logical line, we're not going to start one. If it did,\n # we're indeed going to start a new logical line\n @state[:ll_start] = @state[:ll_end]\n\n # We will start with the assumption that we're going to end the current logical line. We may layer\n # find out that we did not, in fact, do so.\n @state[:ll_end] = true\n\n # Reset the line continuator flag the the last line may have set to true\n @state[:line_continuator] = false\n\n # Find the first non-(space/tab) character\n index = 0\n while index < line.length && [\" \", \"\\t\"].include?(line[index])\n index += 1\n end\n @state[:indent_string] = line[0...index]\n\n # Iterate over the line's characters as long as there are any. We use different iteration\n # methods depending on whether we're inside a string or not\n index = 0\n while index < line.length\n if @state[:in_string].nil?\n index = parse_characters_normal(line, index)\n else\n index = parse_characters_in_string(line, index)\n end\n end\n\n # We have reached the end of the line. Decide whether or not the logical line ends here.\n @state[:ll_end] = @state[:in_string].nil? && @state[:open_braces] == 0 && !@state[:line_continuator]\n end", "def peek_lit(hint)\n pos0 = @pos\n while lit = next_input_element(hint) and (lit.ws? or lit.lt?)\n end\n @pos = pos0\n lit\n end", "def literal?(node); end", "def _rl_erase_at_end_of_line(l)\r\n _rl_backspace(l)\r\n @rl_outstream.write(' '*l)\r\n _rl_backspace(l)\r\n @_rl_last_c_pos -= l\r\n @visible_line[@_rl_last_c_pos,l] = 0.chr * l\r\n @rl_display_fixed = true if !@rl_display_fixed\r\n end", "def lineCheck (l1, l2)\n if(l1 == l2)\n true;\n else\n false;\n end\nend", "def lex_start; end", "def lex_start; end", "def lex_start; end", "def lex_start; end", "def eat_eol()\n if eol_as_token # if eol is significant in the language...\n position = build_position(:lexeme)\n eol_lexeme = scanner.scan(eol_pattern) # Consume the eol text\n eol_token = [:T_EOL, RaccLexer::Token.new(eol_lexeme, eol_lexeme, position)]\n queue.unshift eol_token\n else\n scanner.scan(eol_pattern) # Consume the eol text\n end\n\n @lineno += 1\n @line_offset = scanner.pos()\n end", "def lt!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 11 )\n\n\n\n type = LT\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 32:5: '<'\n match( 0x3c )\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 11 )\n\n\n end", "def lte!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 12 )\n\n\n\n type = LTE\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 33:6: '<='\n match( \"<=\" )\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 12 )\n\n\n end", "def inc_l\n end", "def lt!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 55 )\n\n type = LT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 382:6: '<'\n match( 0x3c )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 55 )\n\n end", "def nextLine\r\n\t\twhile (@allLines[0] == \"\" || @allLines[0] == \"\\r\\n\" || @allLines[0] == \"\\n\")\r\n\t\t\[email protected]\r\n\t\tend\r\n\t\tif(@allLines[0]!=nil)\r\n\t\t\t@Line = @allLines[0]\r\n\t\t\[email protected]\r\n\t\t\tcheckSpace\r\n\t\tend\r\n\tend", "def fresh_line?\n @content.empty? || @content[-1].eql?(NL)\n end", "def la( count = 1 )\n until @lookahead.length >= count\n if token = @lexer.next_token( @lexer_state.number ) then\n @lookahead << token\n else\n nyi \"error handling for lexer error\" if @lexer.input_remaining?\n break\n end\n end\n \n return @lookahead[count - 1]\n end", "def le!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 57 )\n\n type = LE\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 384:6: '<='\n match( \"<=\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 57 )\n\n end", "def handled_labeled_list(line, level, margin, offset, prefix)\n prefix_length = prefix.length\n text = line.text\n flag = nil\n\n case prefix\n when /^\\[/ then\n flag = :LABELED\n prefix = prefix[1, prefix.length-2]\n when /:$/ then\n flag = :NOTE\n prefix.chop!\n else\n raise \"Invalid List Type: #{self.inspect}\"\n end\n\n # body is on the next line\n if text.length <= offset then\n original_line = line\n line = @lines.next\n return false unless line\n text = line.text\n\n for i in 0..margin\n if text[i] != SPACE\n @lines.unget\n return false\n end\n end\n\n i = margin\n i += 1 while text[i] == SPACE\n\n if i >= text.length then\n @lines.unget\n return false\n else\n offset = i\n prefix_length = 0\n\n if text[offset..-1] =~ SIMPLE_LIST_RE then\n @lines.unget\n line = original_line\n line.text = ''\n else\n @lines.delete original_line\n end\n end\n end\n\n line.stamp :LIST, level+1, prefix, flag\n text[margin, prefix_length] = \" \" * prefix_length\n assign_types_to_lines(offset, level + 1)\n return true\n end", "def t__31!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 2 )\n\n\n\n type = T__31\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 8:9: '!='\n match( \"!=\" )\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 2 )\n\n\n end", "def lex_en_expr_beg=(_arg0); end", "def lex_en_expr_beg=(_arg0); end", "def lex_en_expr_beg=(_arg0); end", "def next_l\n raise \"implement in subclass\"\n end", "def lshift!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 43 )\n\n type = LSHIFT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 164:10: '<<'\n match( \"<<\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 43 )\n\n end", "def opening_brace_on_same_line?(node); end", "def same_line_bracket_block?(result, iter, call_sexp)\n call_sexp.shift # discard the sexp_type, as the processor would\n syntactic = !Call.new(processor).arguments?(call_sexp) || iter.end_with?(\")\")\n stylistic = result !~ /\\n/ && result.size < LINE_LENGTH\n syntactic && stylistic\n end", "def next_char\n @pos += 1\n if (c = @source[@pos..@pos]) == BACKSLASH\n @pos += 1\n [true, @source[@pos..@pos]]\n else\n [false, c]\n end\n end", "def skip_to_eoln\r\n @source.get until @source.eoln?\r\n true\r\n end", "def lex_en_expr_end; end", "def lex_en_expr_end; end", "def lex_en_expr_end; end", "def lpar!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 5 )\n\n\n\n type = LPAR\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 273:4: '('\n match( 0x28 )\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 5 )\n\n\n end", "def push_literal(*args)\n new_literal = Literal.new(self, *args)\n @literal_stack.push(new_literal)\n\n if new_literal.words? && new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_backslash_delimited_words\n else\n self.class.lex_en_plain_backslash_delimited_words\n end\n elsif new_literal.words? && !new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_words\n else\n self.class.lex_en_plain_words\n end\n elsif !new_literal.words? && new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_backslash_delimited\n else\n self.class.lex_en_plain_backslash_delimited\n end\n else\n if new_literal.interpolate?\n self.class.lex_en_interp_string\n else\n self.class.lex_en_plain_string\n end\n end\n end", "def fwd_lit(hint)\n while lit = next_input_element(hint) and (lit.ws? or lit.lt?)\n end\n lit\n end", "def until_\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 41 )\n until__start_index = @input.index\n\n success = false # flag used for memoization\n\n begin\n # rule memoization\n if @state.backtracking > 0 and already_parsed_rule?( __method__ )\n success = true\n return \n end\n # at line 326:9: ( 'U' | 'u' ) ( 'N' | 'n' ) ( 'T' | 't' ) ( 'I' | 'i' ) ( 'L' | 'l' )\n if @input.peek( 1 ).between?( T__14, T__15 )\n @input.consume\n @state.error_recovery = false\n else\n @state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )\n\n mse = MismatchedSet( nil )\n raise mse\n end\n\n\n if @input.peek( 1 ).between?( T__34, T__35 )\n @input.consume\n @state.error_recovery = false\n else\n @state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )\n\n mse = MismatchedSet( nil )\n raise mse\n end\n\n\n if @input.peek( 1 ).between?( T__16, T__17 )\n @input.consume\n @state.error_recovery = false\n else\n @state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )\n\n mse = MismatchedSet( nil )\n raise mse\n end\n\n\n if @input.peek( 1 ).between?( T__24, T__25 )\n @input.consume\n @state.error_recovery = false\n else\n @state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )\n\n mse = MismatchedSet( nil )\n raise mse\n end\n\n\n if @input.peek( 1 ).between?( T__22, T__23 )\n @input.consume\n @state.error_recovery = false\n else\n @state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )\n\n mse = MismatchedSet( nil )\n raise mse\n end\n\n\n\n success = true\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 41 )\n memoize( __method__, until__start_index, success ) if @state.backtracking > 0\n\n end\n \n return \n end", "def is_same_line on_line, off_line\n\tif on_line == off_line\n\t\ttrue\n\telse\n\t\tfalse\n\tend\nend", "def lshift_asgn!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 46 )\n\n type = LSHIFT_ASGN\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 167:15: '<<='\n match( \"<<=\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 46 )\n\n end", "def correct_for_literals(lineno, column)\n tstring_index = @lex.index do |pos, token|\n pos[0] == lineno and pos[1] == column and\n token == :on_tstring_content\n end\n\n tstring_index ? @lex[tstring_index -1][0][1] : column\n end", "def llader!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 8 )\n\n\n\n type = LLADER\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 285:4: '}'\n match( 0x7d )\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 8 )\n\n\n end", "def t__19!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 12 )\n\n type = T__19\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 28:9: 'l'\n match( 0x6c )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 12 )\n\n end", "def node_on_single_line?(node)\n return if node.source_range.start_pos.line != node.source_range.end_pos.line\n\n # The Sass parser reports an incorrect source range if the trailing curly\n # brace is on the next line, e.g.\n #\n # p {\n # }\n #\n # Since we don't want to count this as a single line node, check if the\n # last character on the first line is an opening curly brace.\n engine.lines[node.line - 1].strip[-1] != '{'\n end", "def ignorable_nl!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 106 )\n\n \n # - - - - main rule block - - - -\n # at line 948:3: ( '//' (~ ( '\\\\n' | '\\\\r' ) )* ( ( '\\\\r' )? '\\\\n' )? | '/*' ( . )* '*/' | ( '\\\\t' | '\\\\f' | ' ' | '\\\\u00A0' | '\\\\n' | '\\\\r' )+ )\n alt_50 = 3\n look_50_0 = @input.peek( 1 )\n\n if ( look_50_0 == 0x2f )\n look_50_1 = @input.peek( 2 )\n\n if ( look_50_1 == 0x2f )\n alt_50 = 1\n elsif ( look_50_1 == 0x2a )\n alt_50 = 2\n else\n raise NoViableAlternative( \"\", 50, 1 )\n end\n elsif ( look_50_0.between?( 0x9, 0xa ) || look_50_0.between?( 0xc, 0xd ) || look_50_0 == 0x20 || look_50_0 == 0xa0 )\n alt_50 = 3\n else\n raise NoViableAlternative( \"\", 50, 0 )\n end\n case alt_50\n when 1\n # at line 948:5: '//' (~ ( '\\\\n' | '\\\\r' ) )* ( ( '\\\\r' )? '\\\\n' )?\n match( \"//\" )\n # at line 948:10: (~ ( '\\\\n' | '\\\\r' ) )*\n while true # decision 45\n alt_45 = 2\n look_45_0 = @input.peek( 1 )\n\n if ( look_45_0.between?( 0x0, 0x9 ) || look_45_0.between?( 0xb, 0xc ) || look_45_0.between?( 0xe, 0xffff ) )\n alt_45 = 1\n\n end\n case alt_45\n when 1\n # at line 948:10: ~ ( '\\\\n' | '\\\\r' )\n if @input.peek( 1 ).between?( 0x0, 0x9 ) || @input.peek( 1 ).between?( 0xb, 0xc ) || @input.peek( 1 ).between?( 0xe, 0xff )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n\n else\n break # out of loop for decision 45\n end\n end # loop for decision 45\n # at line 948:28: ( ( '\\\\r' )? '\\\\n' )?\n alt_47 = 2\n look_47_0 = @input.peek( 1 )\n\n if ( look_47_0 == 0xa || look_47_0 == 0xd )\n alt_47 = 1\n end\n case alt_47\n when 1\n # at line 948:30: ( '\\\\r' )? '\\\\n'\n # at line 948:30: ( '\\\\r' )?\n alt_46 = 2\n look_46_0 = @input.peek( 1 )\n\n if ( look_46_0 == 0xd )\n alt_46 = 1\n end\n case alt_46\n when 1\n # at line 948:30: '\\\\r'\n match( 0xd )\n\n end\n match( 0xa )\n\n end\n\n when 2\n # at line 949:5: '/*' ( . )* '*/'\n match( \"/*\" )\n # at line 949:10: ( . )*\n while true # decision 48\n alt_48 = 2\n look_48_0 = @input.peek( 1 )\n\n if ( look_48_0 == 0x2a )\n look_48_1 = @input.peek( 2 )\n\n if ( look_48_1 == 0x2f )\n alt_48 = 2\n elsif ( look_48_1.between?( 0x0, 0x2e ) || look_48_1.between?( 0x30, 0xffff ) )\n alt_48 = 1\n\n end\n elsif ( look_48_0.between?( 0x0, 0x29 ) || look_48_0.between?( 0x2b, 0xffff ) )\n alt_48 = 1\n\n end\n case alt_48\n when 1\n # at line 949:10: .\n match_any\n\n else\n break # out of loop for decision 48\n end\n end # loop for decision 48\n match( \"*/\" )\n\n when 3\n # at line 950:5: ( '\\\\t' | '\\\\f' | ' ' | '\\\\u00A0' | '\\\\n' | '\\\\r' )+\n # at file 950:5: ( '\\\\t' | '\\\\f' | ' ' | '\\\\u00A0' | '\\\\n' | '\\\\r' )+\n match_count_49 = 0\n while true\n alt_49 = 2\n look_49_0 = @input.peek( 1 )\n\n if ( look_49_0.between?( 0x9, 0xa ) || look_49_0.between?( 0xc, 0xd ) || look_49_0 == 0x20 || look_49_0 == 0xa0 )\n alt_49 = 1\n\n end\n case alt_49\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x9, 0xa ) || @input.peek( 1 ).between?( 0xc, 0xd ) || @input.peek(1) == 0x20 || @input.peek(1) == 0xa0\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n end\n\n\n\n else\n match_count_49 > 0 and break\n eee = EarlyExit(49)\n\n\n raise eee\n end\n match_count_49 += 1\n end\n\n\n end\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 106 )\n\n end", "def indent_line\n result = false\n level = calculate_indentation\n return result if level.nil? || level < 0\n @buffer.save_excursion do\n @buffer.beginning_of_line\n @buffer.composite_edit do\n if @buffer.looking_at?(/[ \\t]+/)\n s = @buffer.match_string(0)\n break if /\\t/ !~ s && s.size == level\n @buffer.delete_region(@buffer.match_beginning(0),\n @buffer.match_end(0))\n else\n break if level == 0\n end\n @buffer.indent_to(level)\n end\n result = true\n end\n pos = @buffer.point\n @buffer.beginning_of_line\n @buffer.forward_char while /[ \\t]/ =~ @buffer.char_after\n if @buffer.point < pos\n @buffer.goto_char(pos)\n end\n result\n end", "def process_new_line\n @status.line += 1\n\n @skip_next = true if ((@cur_char == \"\\n\" && @next_char == \"\\r\") ||\n (@cur_char == \"\\r\" && @next_char == \"\\n\"))\n end", "def check_lbl_2(linen, align)\n case linen\n when 1, 2, 3, 4\n @lineali[linen-1] = align\n end\n retme = \"\"\n return retme\n end", "def lex(input)\n line = 1\n offset = 0\n ending = input.length\n\n until offset == ending do\n next_token(input, offset, line).tap do |token|\n raise UnconsumedInputError,\n \"Unmatched input #{input[offset..-1].inspect} on line #{line}\" if token.nil?\n\n token[:offset] = offset\n line, token[:line] = token[:line], line\n offset += token[:value].length\n yield token unless token[:discarded]\n end\n end\n\n yield ({ :name => :$end, :line => line, :value => nil, :offset => offset })\n end", "def next_match char\n data = get_content\n row = focussed_index + 1\n row.upto(data.length-1) do |ix|\n val = data[ix].chomp rescue return # 2010-01-05 15:28 crashed on trueclass\n #if val[0,1] == char #and val != currval\n if val[0,1].casecmp(char) == 0 #AND VAL != CURRval\n return ix\n end\n end\n row = focussed_index - 1\n 0.upto(row) do |ix|\n val = data[ix].chomp\n #if val[0,1] == char #and val != currval\n if val[0,1].casecmp(char) == 0 #and val != currval\n return ix\n end\n end\n return -1\n end", "def skip_white_space_or_to_eoln\r\n while (next_char = @source.get)\r\n return next_char if (next_char > ' ') || @source.eoln?\r\n end\r\n end", "def t__23!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 12 )\n\n type = T__23\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 28:9: 'l'\n match( 0x6c )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 12 )\n\n end", "def t__33!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 4 )\n\n\n\n type = T__33\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 10:9: '&&'\n match( \"&&\" )\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 4 )\n\n\n end", "def lex_en_line_comment=(_arg0); end", "def lex_en_line_comment=(_arg0); end", "def lex_en_line_comment=(_arg0); end", "def new_line?\n ary = insertion_point\n ary.empty? || ary.all? {|stmt| stmt.type == :code }\n end" ]
[ "0.622008", "0.61925745", "0.606427", "0.5912112", "0.58234286", "0.58234286", "0.58234286", "0.57885873", "0.55616665", "0.55616665", "0.55616665", "0.55249256", "0.5514974", "0.53779286", "0.5363948", "0.533078", "0.53150797", "0.53150797", "0.53150797", "0.52767444", "0.52493024", "0.524566", "0.52450293", "0.5193093", "0.5175581", "0.5168301", "0.5148423", "0.51328796", "0.51298404", "0.51233846", "0.51092035", "0.5094366", "0.5094366", "0.5094366", "0.5074922", "0.5048564", "0.5043503", "0.50408703", "0.5040429", "0.50393945", "0.50393945", "0.50393945", "0.50393945", "0.5034985", "0.5011443", "0.5011288", "0.500367", "0.50022155", "0.50004756", "0.49829832", "0.49753672", "0.49753672", "0.49753672", "0.49753672", "0.49748963", "0.497469", "0.496842", "0.49657935", "0.4965515", "0.49553752", "0.49430537", "0.49384126", "0.49382666", "0.4935966", "0.49329388", "0.49327007", "0.49327007", "0.49327007", "0.49264008", "0.4925163", "0.49251434", "0.49056938", "0.489528", "0.48820287", "0.48795864", "0.48795864", "0.48795864", "0.48666447", "0.48605067", "0.48551008", "0.48404685", "0.48363084", "0.48304915", "0.48270214", "0.48244408", "0.48230845", "0.48143718", "0.48116595", "0.4811008", "0.48091984", "0.48073363", "0.480653", "0.4802457", "0.47976315", "0.47901028", "0.47897935", "0.47882962", "0.47882962", "0.47882962", "0.47805947" ]
0.58858633
4
check next literal is equal to _l_ or not. white spaces and line terminators are skipped and ignored. if next literal is not _l_, position is not forwarded if next literal is _l_, position is forwarded
def match_lit?(l, hint = nil) lit = peek_lit(hint) if lit == l fwd_after_peek lit else nil end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def match_lit_nolt?(l, hint = nil)\n lit = peek_lit_nolt(hint)\n if lit == l\n fwd_after_peek\n lit\n else\n nil\n end\n end", "def parse_lit\n case l.front.type\n when :str then parse_str_lit\n when :chr then parse_char_lit\n when :num then parse_num_lit\n else\n error \"expected a literal\"\n end\n end", "def lex_en_line_begin; end", "def lex_en_line_begin; end", "def lex_en_line_begin; end", "def is_lpar(latex, step)\n\tlatex[step+1..step+5].join == \"left(\"\nend", "def eql_lit?(l, hint = nil)\n lit = peek_lit(hint)\n if lit.eql? l\n fwd_after_peek\n lit\n else\n nil\n end\n end", "def lex_en_expr_beg; end", "def lex_en_expr_beg; end", "def lex_en_expr_beg; end", "def eql_lit_nolt?(l, hint = nil)\n lit = peek_lit_nolt(hint)\n if lit.eql? l\n fwd_after_peek\n lit\n else\n nil\n end\n end", "def lex_en_line_begin=(_arg0); end", "def lex_en_line_begin=(_arg0); end", "def lex_en_line_begin=(_arg0); end", "def _linear_white_space\n _save = self.pos\n\n _save1 = self.pos\n while true # sequence\n _save2 = self.pos\n _tmp = apply(:_CRLF)\n unless _tmp\n _tmp = true\n self.pos = _save2\n end\n unless _tmp\n self.pos = _save1\n break\n end\n _tmp = apply(:_LWSP_char)\n unless _tmp\n self.pos = _save1\n end\n break\n end # end sequence\n\n if _tmp\n while true\n\n _save3 = self.pos\n while true # sequence\n _save4 = self.pos\n _tmp = apply(:_CRLF)\n unless _tmp\n _tmp = true\n self.pos = _save4\n end\n unless _tmp\n self.pos = _save3\n break\n end\n _tmp = apply(:_LWSP_char)\n unless _tmp\n self.pos = _save3\n end\n break\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true\n else\n self.pos = _save\n end\n set_failed_rule :_linear_white_space unless _tmp\n return _tmp\n end", "def nl!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 51 )\n\n\n\n type = NL\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 230:5: ( '\\\\n' )+\n # at file 230:5: ( '\\\\n' )+\n match_count_8 = 0\n while true\n alt_8 = 2\n look_8_0 = @input.peek( 1 )\n\n if ( look_8_0 == 0xa )\n alt_8 = 1\n\n end\n case alt_8\n when 1\n # at line 230:5: '\\\\n'\n match( 0xa )\n\n else\n match_count_8 > 0 and break\n eee = EarlyExit(8)\n\n\n raise eee\n end\n match_count_8 += 1\n end\n\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 51 )\n\n\n end", "def next_line\r\n while true\r\n if (@lexemes[@pointer] != nil && @lexemes[@pointer] != '@')\r\n @pointer = @pointer.next\r\n else\r\n break\r\n end\r\n end\r\n end", "def _Le\n\n _save = self.pos\n while true # choice\n _tmp = apply(:_Nl)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_Eof)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_Le unless _tmp\n return _tmp\n end", "def printl!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 14 )\n\n type = PRINTL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 316:10: 'printl'\n match( \"printl\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 14 )\n\n end", "def advance\n r = yylex\n self.token = r\n\n raise \"yylex returned nil\" unless r\n\n return RubyLexer::EOF != r\n end", "def next_token\n\n if @ss.bol?\n @line+=1\n @[email protected]\n end\n\n position=[@line,@ss.pos-@old_pos+1]\n\n return :eos if @ss.eos?\n\n case\n when text = @ss.scan(NEWLINE)\n next_token()\n when text = @ss.scan(SPACE)\n next_token()\n when text = @ss.scan(COMMENT)\n next_token()\n when text = @ss.scan(ARROW)\n return Token.new [:arrow,text,position]\n when text = @ss.scan(LT)\n return Token.new [:lt,text,position]\n when text = @ss.scan(LBRACK)\n return Token.new [:lbrack,text,position]\n when text = @ss.scan(RBRACK)\n return Token.new [:rbrack,text,position]\n when text = @ss.scan(IDENTIFIER)\n case\n when value = text.match(IDENT)\n return Token.new [:IDENT,text,position]\n when value = text.match(FLOAT)\n return Token.new [:FLOAT,text,position]\n when value = text.match(INT)\n return Token.new [:INT,text,position]\n when value = text.match(STRING)\n return Token.new [:STRING,text,position]\n when value = text.match(MODULE)\n return Token.new [:module,text,position]\n when value = text.match(CLASS)\n return Token.new [:class,text,position]\n when value = text.match(END_)\n return Token.new [:end,text,position]\n when value = text.match(ATTR)\n return Token.new [:attr,text,position]\n when value = text.match(LPAREN)\n return Token.new [:lparen,text,position]\n when value = text.match(RPAREN)\n return Token.new [:rparen,text,position]\n else\n return Token.new [:identifier,text,position]\n end\n else\n x = @ss.getch\n return Token.new [x, x,position]\n end\n end", "def octal_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 70 )\n\n\n\n type = OctalLiteral\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 527:16: '0' ( '0' .. '7' )+\n match( 0x30 )\n # at file 527:20: ( '0' .. '7' )+\n match_count_22 = 0\n while true\n alt_22 = 2\n look_22_0 = @input.peek( 1 )\n\n if ( look_22_0.between?( 0x30, 0x37 ) )\n alt_22 = 1\n\n end\n case alt_22\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x30, 0x37 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n match_count_22 > 0 and break\n eee = EarlyExit(22)\n\n\n raise eee\n end\n match_count_22 += 1\n end\n\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 70 )\n\n\n end", "def next_item\n return @last_lexeme if @last_lexeme[0].nil?\n while true\n @line = next_line if buffer_empty?\n if @line.nil?\n lexeme = [nil, @line_no, 1]\n break\n end\n\n # Skip whitespaces\n while space?(@line[@pos])\n @pos += 1\n end\n\n # Skip triple dot characters (join lines)\n if @line[@pos, 4] == \"...\\n\" || @line[@pos, 2] == \"…\\n\"\n line_no, pos = @line_no, @pos + 1\n @line, @pos = next_line, 0\n if @line.nil? || @line.strip.empty?\n raise SyntaxError.new(line_no, pos, 'Line continuation may not be followed by an empty line')\n end\n next\n end\n\n # Skip one line comments\n if @line[@pos, 3] == 'BTW'\n @pos = @line.length - 1\n end\n # and multiline ones\n if @last_lexeme[0] == \"\\n\" && @line[@pos, 4] == 'OBTW'\n tldr_found, line_no, pos = false, @line_no, @pos + 1\n while true\n @line = next_line\n break if @line.nil?\n m = @line.chomp.match(/(^|\\s+)TLDR\\s*(,|$)/)\n unless m.nil?\n tldr_found = true\n @pos = m.end(0)\n break\n end\n end\n unless tldr_found\n raise SyntaxError.new(line_no, pos, 'Unterminated multiline comment')\n end\n next\n end\n\n if @line[@pos] == \"\\n\" || @line[@pos] == '!'\n # Handle newline and bang separately\n lexeme = [@line[@pos], @line_no, @pos + 1]\n @pos += 1\n elsif @line[@pos] == ','\n # Comma is a virtual newline\n lexeme = [\"\\n\", @line_no, @pos + 1]\n @pos += 1\n elsif @line[@pos] == '\"'\n # Strings begin with \"\n # Need to handle empty strings separately\n if @line[@pos + 1] == '\"'\n string = '\"\"'\n else\n m = @line.match(/([^:](?:::)*)\"/, @pos + 1)\n string = @line[@pos..m.end(0) - 1] unless m.nil?\n end\n # String must be followed by an allowed lexeme delimiter\n if string.nil? || !lexeme_delimiter?(@pos + string.length)\n raise SyntaxError.new(@line_no, @pos + 1, 'Unterminated string constant')\n end\n lexeme = [%Q[\"#{escape_string(string[1..-2])}\"], @line_no, @pos + 1]\n @pos = @pos + string.length\n else\n # Grab as much characters as we can until meeting lexeme delimiter\n # Treat what we grabbed as a lexeme\n seq, pos = '', @pos + 1\n until lexeme_delimiter?(@pos)\n seq += @line[@pos]\n @pos += 1\n end\n lexeme = [seq, @line_no, pos]\n end\n\n break\n end\n @last_lexeme = lexeme\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n if ss.peek(1) == \"\\n\"\n self.lineno += 1\n # line starts 1 position after the newline\n self.start_of_current_line_pos = ss.pos + 1\n end\n self.old_pos = ss.pos\n token =\n case state\n when nil then\n case\n when ss.skip(/[ \\t]+/) then\n # do nothing\n when ss.skip(/\\/\\/[^\\r\\n]*/) then\n # do nothing\n when text = ss.scan(/\\r|\\n/) then\n newline text\n when text = ss.scan(/[!=<>]=?/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/[(){},;.\\-+\\/*]/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/#{DIGIT}+(\\.#{DIGIT}+)?/) then\n action { [:NUMBER, text] }\n when text = ss.scan(/nil/) then\n action { [:NIL, text] }\n when text = ss.scan(/false/) then\n action { [:FALSE, text] }\n when text = ss.scan(/true/) then\n action { [:TRUE, text] }\n when text = ss.scan(/#{ALPHA}(#{ALPHA}|#{DIGIT})*/) then\n action { [:IDENTIFIER, text] }\n when ss.skip(/\"\"/) then\n action { [:STRING, '\"\"'] }\n when ss.skip(/\"/) then\n [:state, :IN_STRING]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n when :IN_STRING then\n case\n when text = ss.scan(/[^\"]+/) then\n action { [:STRING, \"\\\"#{text}\\\"\"] }\n when ss.skip(/\"/) then\n [:state, nil]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def _literal\n\n _save = self.pos\n while true # choice\n _tmp = apply(:_number)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_string)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_literal unless _tmp\n return _tmp\n end", "def lex_en_line_comment; end", "def lex_en_line_comment; end", "def lex_en_line_comment; end", "def with_lineno?(node); end", "def literal\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 22 )\n\n\n value = nil\n\n\n a = nil\n\n\n begin\n # at line 142:3: (a= INTEGER |a= FLOAT |a= BOOLEAN |a= STRING |a= CHAR )\n alt_38 = 5\n case look_38 = @input.peek( 1 )\n when INTEGER then alt_38 = 1\n when FLOAT then alt_38 = 2\n when BOOLEAN then alt_38 = 3\n when STRING then alt_38 = 4\n when CHAR then alt_38 = 5\n else\n @state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )\n\n\n\n raise NoViableAlternative( \"\", 38, 0 )\n\n end\n case alt_38\n when 1\n # at line 142:5: a= INTEGER\n a = match( INTEGER, TOKENS_FOLLOWING_INTEGER_IN_literal_1037 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Int, a.text) \n # <-- action\n end\n\n\n when 2\n # at line 143:5: a= FLOAT\n a = match( FLOAT, TOKENS_FOLLOWING_FLOAT_IN_literal_1047 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Float, a.text) \n # <-- action\n end\n\n\n when 3\n # at line 144:5: a= BOOLEAN\n a = match( BOOLEAN, TOKENS_FOLLOWING_BOOLEAN_IN_literal_1059 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Bool, a.text) \n # <-- action\n end\n\n\n when 4\n # at line 145:5: a= STRING\n a = match( STRING, TOKENS_FOLLOWING_STRING_IN_literal_1069 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:String, a.text) \n # <-- action\n end\n\n\n when 5\n # at line 146:5: a= CHAR\n a = match( CHAR, TOKENS_FOLLOWING_CHAR_IN_literal_1080 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Char, a.text) \n # <-- action\n end\n\n\n end\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 22 )\n\n\n end\n\n return value\n end", "def parse_nl\n s0 = @scanner.pos\n s2 = parse_newline\n if s2 == :failed\n s1 = :failed\n else\n s1 = []\n while s2 != :failed\n s1 << s2\n s2 = parse_newline\n end\n end\n if s1 == :failed\n @scanner.pos = s0\n :failed\n else\n s2 = []\n s3 = parse_skipline\n while s3 != :failed\n s2 << s3\n s3 = parse_skipline\n end\n [s1, s2]\n end\n end", "def eat_eol()\n if eol_as_token # if eol is significant in the language...\n position = build_position(:lexeme)\n eol_lexeme = scanner.scan(eol_pattern) # Consume the eol text\n eol_token = [:T_EOL, RaccLexer::Token.new(eol_lexeme, eol_lexeme, position)]\n queue.unshift eol_token\n else\n scanner.scan(eol_pattern) # Consume the eol text\n end\n\n @lineno += 1\n @line_offset = scanner.pos()\n end", "def lex_start; end", "def lex_start; end", "def lex_start; end", "def lex_start; end", "def t__18!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 11 )\n\n type = T__18\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 27:9: 'L'\n match( 0x4c )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 11 )\n\n end", "def lpar!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 5 )\n\n\n\n type = LPAR\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 273:4: '('\n match( 0x28 )\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 5 )\n\n\n end", "def lex_en_expr_end; end", "def lex_en_expr_end; end", "def lex_en_expr_end; end", "def lmatch(a, lchar, rchar)\n\n token = []\n c = a.first\n token << c until (c = a.shift; c == lchar or c == rchar or a.empty?)\n token << c\n\n if c == lchar then\n found, tokenx, remainderx = rmatch(a, lchar, rchar)\n c = found\n token << tokenx\n remainder = remainderx\n else\n remainder = a.join\n end\n\n [c, token.join, remainder]\n end", "def lt!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 11 )\n\n\n\n type = LT\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 32:5: '<'\n match( 0x3c )\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 11 )\n\n\n end", "def lt!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 27 )\n\n type = LT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 161:6: '<'\n match( 0x3c )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 27 )\n\n end", "def llader!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 8 )\n\n\n\n type = LLADER\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 285:4: '}'\n match( 0x7d )\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 8 )\n\n\n end", "def inc_l\n end", "def handled_labeled_list(line, level, margin, offset, prefix)\n prefix_length = prefix.length\n text = line.text\n flag = nil\n\n case prefix\n when /^\\[/ then\n flag = :LABELED\n prefix = prefix[1, prefix.length-2]\n when /:$/ then\n flag = :NOTE\n prefix.chop!\n else\n raise \"Invalid List Type: #{self.inspect}\"\n end\n\n # body is on the next line\n if text.length <= offset then\n original_line = line\n line = @lines.next\n return false unless line\n text = line.text\n\n for i in 0..margin\n if text[i] != SPACE\n @lines.unget\n return false\n end\n end\n\n i = margin\n i += 1 while text[i] == SPACE\n\n if i >= text.length then\n @lines.unget\n return false\n else\n offset = i\n prefix_length = 0\n\n if text[offset..-1] =~ SIMPLE_LIST_RE then\n @lines.unget\n line = original_line\n line.text = ''\n else\n @lines.delete original_line\n end\n end\n end\n\n line.stamp :LIST, level+1, prefix, flag\n text[margin, prefix_length] = \" \" * prefix_length\n assign_types_to_lines(offset, level + 1)\n return true\n end", "def lex_en_expr_beg=(_arg0); end", "def lex_en_expr_beg=(_arg0); end", "def lex_en_expr_beg=(_arg0); end", "def double_angle_string_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 45)\n\n type = DOUBLE_ANGLE_STRING_LITERAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 500:4: '<<' ( . )* '>>'\n match(\"<<\")\n # at line 500:9: ( . )*\n loop do #loop 8\n alt_8 = 2\n look_8_0 = @input.peek(1)\n\n if (look_8_0 == ?>) \n look_8_1 = @input.peek(2)\n\n if (look_8_1 == ?>) \n alt_8 = 2\n elsif (look_8_1.between?(0x0000, ?=) || look_8_1.between?(??, 0xFFFF)) \n alt_8 = 1\n\n end\n elsif (look_8_0.between?(0x0000, ?=) || look_8_0.between?(??, 0xFFFF)) \n alt_8 = 1\n\n end\n case alt_8\n when 1\n # at line 500:9: .\n match_any\n\n else\n break #loop 8\n end\n end\n match(\">>\")\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 45)\n\n end", "def _rl_erase_at_end_of_line(l)\r\n _rl_backspace(l)\r\n @rl_outstream.write(' '*l)\r\n _rl_backspace(l)\r\n @_rl_last_c_pos -= l\r\n @visible_line[@_rl_last_c_pos,l] = 0.chr * l\r\n @rl_display_fixed = true if !@rl_display_fixed\r\n end", "def lt!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 55 )\n\n type = LT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 382:6: '<'\n match( 0x3c )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 55 )\n\n end", "def next_char\n @pos += 1\n if (c = @source[@pos..@pos]) == BACKSLASH\n @pos += 1\n [true, @source[@pos..@pos]]\n else\n [false, c]\n end\n end", "def la( count = 1 )\n until @lookahead.length >= count\n if token = @lexer.next_token( @lexer_state.number ) then\n @lookahead << token\n else\n nyi \"error handling for lexer error\" if @lexer.input_remaining?\n break\n end\n end\n \n return @lookahead[count - 1]\n end", "def parse_line(line)\n # If the previous line didn't and a logical line, we're not going to start one. If it did,\n # we're indeed going to start a new logical line\n @state[:ll_start] = @state[:ll_end]\n\n # We will start with the assumption that we're going to end the current logical line. We may layer\n # find out that we did not, in fact, do so.\n @state[:ll_end] = true\n\n # Reset the line continuator flag the the last line may have set to true\n @state[:line_continuator] = false\n\n # Find the first non-(space/tab) character\n index = 0\n while index < line.length && [\" \", \"\\t\"].include?(line[index])\n index += 1\n end\n @state[:indent_string] = line[0...index]\n\n # Iterate over the line's characters as long as there are any. We use different iteration\n # methods depending on whether we're inside a string or not\n index = 0\n while index < line.length\n if @state[:in_string].nil?\n index = parse_characters_normal(line, index)\n else\n index = parse_characters_in_string(line, index)\n end\n end\n\n # We have reached the end of the line. Decide whether or not the logical line ends here.\n @state[:ll_end] = @state[:in_string].nil? && @state[:open_braces] == 0 && !@state[:line_continuator]\n end", "def lshift!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 43 )\n\n type = LSHIFT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 164:10: '<<'\n match( \"<<\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 43 )\n\n end", "def lex_en_line_comment=(_arg0); end", "def lex_en_line_comment=(_arg0); end", "def lex_en_line_comment=(_arg0); end", "def _EmptyLine\n\n _save = self.pos\n while true # sequence\n _tmp = scan(/\\G(?-mix:^)/)\n unless _tmp\n self.pos = _save\n break\n end\n _tmp = apply(:__hyphen_)\n unless _tmp\n self.pos = _save\n break\n end\n\n _save1 = self.pos\n while true # choice\n _tmp = apply(:_Nl)\n break if _tmp\n self.pos = _save1\n _tmp = apply(:_Comment)\n break if _tmp\n self.pos = _save1\n _tmp = apply(:_EofComment)\n break if _tmp\n self.pos = _save1\n break\n end # end choice\n\n unless _tmp\n self.pos = _save\n end\n break\n end # end sequence\n\n set_failed_rule :_EmptyLine unless _tmp\n return _tmp\n end", "def peek_lit(hint)\n pos0 = @pos\n while lit = next_input_element(hint) and (lit.ws? or lit.lt?)\n end\n @pos = pos0\n lit\n end", "def literal?(node); end", "def jump_to_line l\n l = l.clamp 0, num_lines - 1\n return if @topline == l\n @topline = l\n @botline = [l + buffer.content_height, num_lines].min\n buffer.mark_dirty!\n end", "def lbracket!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 43 )\n\n type = LBRACKET\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 179:12: '('\n match( 0x28 )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 43 )\n\n end", "def push_literal(*args)\n new_literal = Literal.new(self, *args)\n @literal_stack.push(new_literal)\n\n if new_literal.words? && new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_backslash_delimited_words\n else\n self.class.lex_en_plain_backslash_delimited_words\n end\n elsif new_literal.words? && !new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_words\n else\n self.class.lex_en_plain_words\n end\n elsif !new_literal.words? && new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_backslash_delimited\n else\n self.class.lex_en_plain_backslash_delimited\n end\n else\n if new_literal.interpolate?\n self.class.lex_en_interp_string\n else\n self.class.lex_en_plain_string\n end\n end\n end", "def char_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 41)\n\n type = CHAR_LITERAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 482:4: '\\\\'' LITERAL_CHAR '\\\\''\n match(?\\')\n literal_char!\n match(?\\')\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 41)\n\n end", "def is_literal?\n if @operator && @operator.is_negation?\n @right_sentence.is_literal?\n else\n @left_sentence.nil? && @right_sentence.nil?\n end\n end", "def t__19!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 12 )\n\n type = T__19\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 28:9: 'l'\n match( 0x6c )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 12 )\n\n end", "def parse_line_break; end", "def next_l\n raise \"implement in subclass\"\n end", "def is_rpar(latex, step)\n\tlatex[step+1..step+5].join == \"right)\"\nend", "def process_new_line\n @status.line += 1\n\n @skip_next = true if ((@cur_char == \"\\n\" && @next_char == \"\\r\") ||\n (@cur_char == \"\\r\" && @next_char == \"\\n\"))\n end", "def le!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 57 )\n\n type = LE\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 384:6: '<='\n match( \"<=\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 57 )\n\n end", "def t__31!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 2 )\n\n\n\n type = T__31\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 8:9: '!='\n match( \"!=\" )\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 2 )\n\n\n end", "def skip_to_eoln\r\n @source.get until @source.eoln?\r\n true\r\n end", "def leq!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 33 )\n\n type = LEQ\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 154:7: '<='\n match( \"<=\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 33 )\n\n end", "def fresh_line?\n @content.empty? || @content[-1].eql?(NL)\n end", "def eol!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 39 )\n\n type = EOL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 138:6: ( '\\\\r' )? '\\\\n'\n # at line 138:6: ( '\\\\r' )?\n alt_2 = 2\n look_2_0 = @input.peek( 1 )\n\n if ( look_2_0 == 0xd )\n alt_2 = 1\n end\n case alt_2\n when 1\n # at line 138:6: '\\\\r'\n match( 0xd )\n\n end\n match( 0xa )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 39 )\n\n end", "def lit(sexp, level)\n val = sexp.shift\n case val\n when Numeric\n val.inspect\n when Symbol\n @symbols[val.to_s] ||= \"$symbol_#{@sym_id += 1}\"\n when Regexp\n val == // ? /^/.inspect : val.inspect\n when Range\n \"$range(#{val.begin}, #{val.end}, #{val.exclude_end?})\"\n else\n raise \"Bad lit: #{val.inspect}\"\n end\n end", "def read_next()\n return nil if @at_end\n\n begin\n pos = @marker.position\n\n if @marker.character == ?\\n\n pos.line += 1\n pos.column = 0\n end\n\n @marker.character = @reader.next\n @marker.source_index += 1\n pos.column += 1\n rescue StopIteration\n @at_end = true\n @marker.character = nil\n end\n\n @marker.character\n end", "def lte!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 12 )\n\n\n\n type = LTE\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 33:6: '<='\n match( \"<=\" )\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 12 )\n\n\n end", "def t__23!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 12 )\n\n type = T__23\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 28:9: 'l'\n match( 0x6c )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 12 )\n\n end", "def eol!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 4 )\n\n\n\n type = EOL\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 269:3: ';'\n match( 0x3b )\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 4 )\n\n\n end", "def t__33!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 4 )\n\n\n\n type = T__33\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 10:9: '&&'\n match( \"&&\" )\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 4 )\n\n\n end", "def decimal_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 69 )\n\n\n\n type = DecimalLiteral\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 525:18: ( '0' | '1' .. '9' ( '0' .. '9' )* )\n # at line 525:18: ( '0' | '1' .. '9' ( '0' .. '9' )* )\n alt_21 = 2\n look_21_0 = @input.peek( 1 )\n\n if ( look_21_0 == 0x30 )\n alt_21 = 1\n elsif ( look_21_0.between?( 0x31, 0x39 ) )\n alt_21 = 2\n else\n raise NoViableAlternative( \"\", 21, 0 )\n\n end\n case alt_21\n when 1\n # at line 525:19: '0'\n match( 0x30 )\n\n when 2\n # at line 525:25: '1' .. '9' ( '0' .. '9' )*\n match_range( 0x31, 0x39 )\n # at line 525:34: ( '0' .. '9' )*\n while true # decision 20\n alt_20 = 2\n look_20_0 = @input.peek( 1 )\n\n if ( look_20_0.between?( 0x30, 0x39 ) )\n alt_20 = 1\n\n end\n case alt_20\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x30, 0x39 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n break # out of loop for decision 20\n end\n end # loop for decision 20\n\n\n end\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 69 )\n\n\n end", "def lshift_asgn!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 46 )\n\n type = LSHIFT_ASGN\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 167:15: '<<='\n match( \"<<=\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 46 )\n\n end", "def next_token\n @sy = @tokenizer.next_token\n \n # ignore EOL tokens since no productions would accept them\n while @sy.type == TokenType::EOL_TOKEN\n @sy = @tokenizer.next_token\n end\n end", "def t__22!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 11 )\n\n type = T__22\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 27:9: 'L'\n match( 0x4c )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 11 )\n\n end", "def line(pos = T.unsafe(nil)); end", "def eol!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 55 )\n\n type = EOL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 354:6: ( '\\\\r' )? '\\\\n'\n # at line 354:6: ( '\\\\r' )?\n alt_6 = 2\n look_6_0 = @input.peek( 1 )\n\n if ( look_6_0 == 0xd )\n alt_6 = 1\n end\n case alt_6\n when 1\n # at line 354:6: '\\\\r'\n match( 0xd )\n\n end\n match( 0xa )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 55 )\n\n end", "def nextLine\r\n\t\twhile (@allLines[0] == \"\" || @allLines[0] == \"\\r\\n\" || @allLines[0] == \"\\n\")\r\n\t\t\[email protected]\r\n\t\tend\r\n\t\tif(@allLines[0]!=nil)\r\n\t\t\t@Line = @allLines[0]\r\n\t\t\[email protected]\r\n\t\t\tcheckSpace\r\n\t\tend\r\n\tend", "def set_line_break_positions!(lb)\n l_ch = @options[:language].chars\n\n # Don't break lines between double open quote and apostrophe (via ~)\n lb.gsub!(\n \"#{ l_ch[:d_quote_open] } #{ l_ch[:apostrophe] }\",\n \"#{ l_ch[:d_quote_open] }~#{ l_ch[:apostrophe] }\"\n )\n\n # Insert zero-width space after all elipses, emdashes, and hyphens.\n # This gives latex the option to break a line after these characters.\n # \\hspace{0pt} is the latex equivalent of zero-width space (&#x200B;)\n line_breakable_chars = Regexp.escape(\n [l_ch[:elipsis], l_ch[:em_dash], '-'].join\n )\n # Exceptions: Don't insert zero-width space if followed by no-break characters:\n no_break_following_chars = Regexp.escape(\n [\n l_ch[:s_quote_close],\n l_ch[:d_quote_close],\n ')?,!',\n \"\\u00A0\", # non-breaking space\n \"\\u202F\", # narrow non-breaking space\n ].join\n )\n # We only want to allow linebreak _after_ line_breakable_chars but not _before_.\n # We insert a \\\\nolinebreak to prevent linebreaks _before_.\n lb.gsub!(\n /\n (?<lbc> # named capture group\n [#{ line_breakable_chars }]\n )\n (?! # not followed by one of the following options\n (\n [#{ no_break_following_chars }] # certain characters\n |\n #{ options[:ed_and_trn_abbreviations] } # language specific editor or translator abbreviations\n |\n \\\\RtLastEagle # last eagle latex command\n )\n )\n /ix,\n \"\\\\nolinebreak[4]\\\\k<lbc>\\\\hspace{0pt}\"\n )\n\n # When we adjust kerning in smallcaps emulation, the previous gsub!\n # inserts a \\\\nolinebreak[4]-\\\\hspace{0pt} between the opening brace\n # and the minus sign of either of any negative kerning values.\n # This gsub! undoes it. I chose to break it into a separate gsub! call\n # in order to keep the previous regex simpler:\n # Original latex:\n # T\\RtSmCapsEmulation{-0.1em}{EXT}\n # Modified by above gsub! to:\n # T\\RtSmCapsEmulation{\\nolinebreak[4]-\\hspace{0pt}0.1em}{EXT}\n # Here we revert it back to:\n # T\\RtSmCapsEmulation{-0.1em}{EXT}\n lb.gsub!(\"{\\\\nolinebreak[4]-\\\\hspace{0pt}\", \"{-\")\n\n # We don't allow linebreaks _before_ or _after_ an emdash when followed\n # by some abbreviations.\n lb.gsub!(\n /\n #{ l_ch[:em_dash] }\n (\n #{ options[:ed_and_trn_abbreviations] }\n )\n /ix,\n \"\\\\nolinebreak[4]\" + l_ch[:em_dash] + \"\\\\nolinebreak[4]\" + '\\1'\n )\n\n # We don't allow linebreaks before certain numbers:\n # `word 1` => `word~1`\n # lb.gsub!(/(?<=[a-z])\\s(?=\\d)/, \"~\")\n\n # We don't allow linebreaks between period and numbers:\n # `word .22` => `word .\\\\nolinebreak[4]22`\n lb.gsub!(/( \\.)(\\d)/, '\\1' + \"\\\\nolinebreak[4]\" + '\\2')\n\n # We don't allow linebreaks between the end of a control sequence and a period\n lb.gsub!(\"}.\", \"}\\\\nolinebreak[4].\")\n\n # We don't allow linebreaks between a hyphen and an ellipsis when\n # followed by a closing quote mark\n lb.gsub!(\n \"\\\\nolinebreak[4]-\\\\hspace{0pt}…#{ l_ch[:d_quote_close] }\",\n \"\\\\nolinebreak[4]-\\\\nolinebreak[4]…#{ l_ch[:d_quote_close] }\"\n )\n\n # We don't allow linebreaks between an ellipsis and the next word\n # when it is preceded by an opening quote mark\n lb.gsub!(\n \"#{ l_ch[:d_quote_open] }\\\\nolinebreak[4]…\\\\hspace{0pt}\",\n \"#{ l_ch[:d_quote_open] }\\\\nolinebreak[4]…\\\\nolinebreak[4]\"\n )\n\n # We don't allow linebreaks between chinese period and closing bracket\n lb.gsub!(\"。]\", \"。\\\\nolinebreak[4]]\")\n\n # Convert any zero-width spaces to latex equivalent\n lb.gsub!(/\\u200B/, \"\\\\hspace{0pt}\")\n end", "def lex(input)\n line = 1\n offset = 0\n ending = input.length\n\n until offset == ending do\n next_token(input, offset, line).tap do |token|\n raise UnconsumedInputError,\n \"Unmatched input #{input[offset..-1].inspect} on line #{line}\" if token.nil?\n\n token[:offset] = offset\n line, token[:line] = token[:line], line\n offset += token[:value].length\n yield token unless token[:discarded]\n end\n end\n\n yield ({ :name => :$end, :line => line, :value => nil, :offset => offset })\n end", "def correct_for_literals(lineno, column)\n tstring_index = @lex.index do |pos, token|\n pos[0] == lineno and pos[1] == column and\n token == :on_tstring_content\n end\n\n tstring_index ? @lex[tstring_index -1][0][1] : column\n end", "def lb!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 38 )\n\n type = LB\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 363:6: '('\n match( 0x28 )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 38 )\n\n end" ]
[ "0.6243398", "0.6158129", "0.60829586", "0.60829586", "0.60829586", "0.60687613", "0.58029675", "0.5778378", "0.5778378", "0.5778378", "0.5704982", "0.55836326", "0.55836326", "0.55836326", "0.5499322", "0.54896456", "0.5468292", "0.5447602", "0.5445223", "0.5434087", "0.54178596", "0.5399679", "0.5396113", "0.538282", "0.53789586", "0.53789586", "0.53789586", "0.53789586", "0.536152", "0.53563035", "0.53563035", "0.53563035", "0.5352201", "0.53053415", "0.53023076", "0.5291661", "0.5286445", "0.5286445", "0.5286445", "0.5286445", "0.52111256", "0.5209495", "0.5194373", "0.5194373", "0.5194373", "0.51941574", "0.5188557", "0.51869273", "0.515983", "0.51520866", "0.5149704", "0.5141968", "0.5141968", "0.5141968", "0.5128446", "0.5116838", "0.51138306", "0.51053196", "0.5094632", "0.50900924", "0.5088881", "0.50879216", "0.50879216", "0.50879216", "0.5085612", "0.5077175", "0.50687665", "0.50552166", "0.5043578", "0.504347", "0.5042817", "0.5038756", "0.5034994", "0.50317657", "0.5029048", "0.5012276", "0.4998127", "0.49961767", "0.49908128", "0.4988467", "0.4988333", "0.49841893", "0.49820483", "0.49815342", "0.49793047", "0.49792838", "0.49745068", "0.497225", "0.49652174", "0.4958857", "0.49494815", "0.494299", "0.493865", "0.4937353", "0.4935603", "0.49337196", "0.49315116", "0.49233967", "0.49179408", "0.49154472" ]
0.62729967
0
check next literal is equal to _l_ or not. white spaces are skipped and ignored. line terminators are not ignored. if next literal is not _l_, position is not forwarded if next literal is _l_, position is forwarded
def match_lit_nolt?(l, hint = nil) lit = peek_lit_nolt(hint) if lit == l fwd_after_peek lit else nil end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def match_lit?(l, hint = nil)\n lit = peek_lit(hint)\n if lit == l\n fwd_after_peek\n lit\n else\n nil\n end\n end", "def is_lpar(latex, step)\n\tlatex[step+1..step+5].join == \"left(\"\nend", "def parse_lit\n case l.front.type\n when :str then parse_str_lit\n when :chr then parse_char_lit\n when :num then parse_num_lit\n else\n error \"expected a literal\"\n end\n end", "def lex_en_line_begin; end", "def lex_en_line_begin; end", "def lex_en_line_begin; end", "def eql_lit?(l, hint = nil)\n lit = peek_lit(hint)\n if lit.eql? l\n fwd_after_peek\n lit\n else\n nil\n end\n end", "def eql_lit_nolt?(l, hint = nil)\n lit = peek_lit_nolt(hint)\n if lit.eql? l\n fwd_after_peek\n lit\n else\n nil\n end\n end", "def lex_en_expr_beg; end", "def lex_en_expr_beg; end", "def lex_en_expr_beg; end", "def lex_en_line_begin=(_arg0); end", "def lex_en_line_begin=(_arg0); end", "def lex_en_line_begin=(_arg0); end", "def next_line\r\n while true\r\n if (@lexemes[@pointer] != nil && @lexemes[@pointer] != '@')\r\n @pointer = @pointer.next\r\n else\r\n break\r\n end\r\n end\r\n end", "def _linear_white_space\n _save = self.pos\n\n _save1 = self.pos\n while true # sequence\n _save2 = self.pos\n _tmp = apply(:_CRLF)\n unless _tmp\n _tmp = true\n self.pos = _save2\n end\n unless _tmp\n self.pos = _save1\n break\n end\n _tmp = apply(:_LWSP_char)\n unless _tmp\n self.pos = _save1\n end\n break\n end # end sequence\n\n if _tmp\n while true\n\n _save3 = self.pos\n while true # sequence\n _save4 = self.pos\n _tmp = apply(:_CRLF)\n unless _tmp\n _tmp = true\n self.pos = _save4\n end\n unless _tmp\n self.pos = _save3\n break\n end\n _tmp = apply(:_LWSP_char)\n unless _tmp\n self.pos = _save3\n end\n break\n end # end sequence\n\n break unless _tmp\n end\n _tmp = true\n else\n self.pos = _save\n end\n set_failed_rule :_linear_white_space unless _tmp\n return _tmp\n end", "def nl!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 51 )\n\n\n\n type = NL\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 230:5: ( '\\\\n' )+\n # at file 230:5: ( '\\\\n' )+\n match_count_8 = 0\n while true\n alt_8 = 2\n look_8_0 = @input.peek( 1 )\n\n if ( look_8_0 == 0xa )\n alt_8 = 1\n\n end\n case alt_8\n when 1\n # at line 230:5: '\\\\n'\n match( 0xa )\n\n else\n match_count_8 > 0 and break\n eee = EarlyExit(8)\n\n\n raise eee\n end\n match_count_8 += 1\n end\n\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 51 )\n\n\n end", "def advance\n r = yylex\n self.token = r\n\n raise \"yylex returned nil\" unless r\n\n return RubyLexer::EOF != r\n end", "def next_item\n return @last_lexeme if @last_lexeme[0].nil?\n while true\n @line = next_line if buffer_empty?\n if @line.nil?\n lexeme = [nil, @line_no, 1]\n break\n end\n\n # Skip whitespaces\n while space?(@line[@pos])\n @pos += 1\n end\n\n # Skip triple dot characters (join lines)\n if @line[@pos, 4] == \"...\\n\" || @line[@pos, 2] == \"…\\n\"\n line_no, pos = @line_no, @pos + 1\n @line, @pos = next_line, 0\n if @line.nil? || @line.strip.empty?\n raise SyntaxError.new(line_no, pos, 'Line continuation may not be followed by an empty line')\n end\n next\n end\n\n # Skip one line comments\n if @line[@pos, 3] == 'BTW'\n @pos = @line.length - 1\n end\n # and multiline ones\n if @last_lexeme[0] == \"\\n\" && @line[@pos, 4] == 'OBTW'\n tldr_found, line_no, pos = false, @line_no, @pos + 1\n while true\n @line = next_line\n break if @line.nil?\n m = @line.chomp.match(/(^|\\s+)TLDR\\s*(,|$)/)\n unless m.nil?\n tldr_found = true\n @pos = m.end(0)\n break\n end\n end\n unless tldr_found\n raise SyntaxError.new(line_no, pos, 'Unterminated multiline comment')\n end\n next\n end\n\n if @line[@pos] == \"\\n\" || @line[@pos] == '!'\n # Handle newline and bang separately\n lexeme = [@line[@pos], @line_no, @pos + 1]\n @pos += 1\n elsif @line[@pos] == ','\n # Comma is a virtual newline\n lexeme = [\"\\n\", @line_no, @pos + 1]\n @pos += 1\n elsif @line[@pos] == '\"'\n # Strings begin with \"\n # Need to handle empty strings separately\n if @line[@pos + 1] == '\"'\n string = '\"\"'\n else\n m = @line.match(/([^:](?:::)*)\"/, @pos + 1)\n string = @line[@pos..m.end(0) - 1] unless m.nil?\n end\n # String must be followed by an allowed lexeme delimiter\n if string.nil? || !lexeme_delimiter?(@pos + string.length)\n raise SyntaxError.new(@line_no, @pos + 1, 'Unterminated string constant')\n end\n lexeme = [%Q[\"#{escape_string(string[1..-2])}\"], @line_no, @pos + 1]\n @pos = @pos + string.length\n else\n # Grab as much characters as we can until meeting lexeme delimiter\n # Treat what we grabbed as a lexeme\n seq, pos = '', @pos + 1\n until lexeme_delimiter?(@pos)\n seq += @line[@pos]\n @pos += 1\n end\n lexeme = [seq, @line_no, pos]\n end\n\n break\n end\n @last_lexeme = lexeme\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n if ss.peek(1) == \"\\n\"\n self.lineno += 1\n # line starts 1 position after the newline\n self.start_of_current_line_pos = ss.pos + 1\n end\n self.old_pos = ss.pos\n token =\n case state\n when nil then\n case\n when ss.skip(/[ \\t]+/) then\n # do nothing\n when ss.skip(/\\/\\/[^\\r\\n]*/) then\n # do nothing\n when text = ss.scan(/\\r|\\n/) then\n newline text\n when text = ss.scan(/[!=<>]=?/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/[(){},;.\\-+\\/*]/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/#{DIGIT}+(\\.#{DIGIT}+)?/) then\n action { [:NUMBER, text] }\n when text = ss.scan(/nil/) then\n action { [:NIL, text] }\n when text = ss.scan(/false/) then\n action { [:FALSE, text] }\n when text = ss.scan(/true/) then\n action { [:TRUE, text] }\n when text = ss.scan(/#{ALPHA}(#{ALPHA}|#{DIGIT})*/) then\n action { [:IDENTIFIER, text] }\n when ss.skip(/\"\"/) then\n action { [:STRING, '\"\"'] }\n when ss.skip(/\"/) then\n [:state, :IN_STRING]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n when :IN_STRING then\n case\n when text = ss.scan(/[^\"]+/) then\n action { [:STRING, \"\\\"#{text}\\\"\"] }\n when ss.skip(/\"/) then\n [:state, nil]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def next_token\n\n if @ss.bol?\n @line+=1\n @[email protected]\n end\n\n position=[@line,@ss.pos-@old_pos+1]\n\n return :eos if @ss.eos?\n\n case\n when text = @ss.scan(NEWLINE)\n next_token()\n when text = @ss.scan(SPACE)\n next_token()\n when text = @ss.scan(COMMENT)\n next_token()\n when text = @ss.scan(ARROW)\n return Token.new [:arrow,text,position]\n when text = @ss.scan(LT)\n return Token.new [:lt,text,position]\n when text = @ss.scan(LBRACK)\n return Token.new [:lbrack,text,position]\n when text = @ss.scan(RBRACK)\n return Token.new [:rbrack,text,position]\n when text = @ss.scan(IDENTIFIER)\n case\n when value = text.match(IDENT)\n return Token.new [:IDENT,text,position]\n when value = text.match(FLOAT)\n return Token.new [:FLOAT,text,position]\n when value = text.match(INT)\n return Token.new [:INT,text,position]\n when value = text.match(STRING)\n return Token.new [:STRING,text,position]\n when value = text.match(MODULE)\n return Token.new [:module,text,position]\n when value = text.match(CLASS)\n return Token.new [:class,text,position]\n when value = text.match(END_)\n return Token.new [:end,text,position]\n when value = text.match(ATTR)\n return Token.new [:attr,text,position]\n when value = text.match(LPAREN)\n return Token.new [:lparen,text,position]\n when value = text.match(RPAREN)\n return Token.new [:rparen,text,position]\n else\n return Token.new [:identifier,text,position]\n end\n else\n x = @ss.getch\n return Token.new [x, x,position]\n end\n end", "def _Le\n\n _save = self.pos\n while true # choice\n _tmp = apply(:_Nl)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_Eof)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_Le unless _tmp\n return _tmp\n end", "def octal_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 70 )\n\n\n\n type = OctalLiteral\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 527:16: '0' ( '0' .. '7' )+\n match( 0x30 )\n # at file 527:20: ( '0' .. '7' )+\n match_count_22 = 0\n while true\n alt_22 = 2\n look_22_0 = @input.peek( 1 )\n\n if ( look_22_0.between?( 0x30, 0x37 ) )\n alt_22 = 1\n\n end\n case alt_22\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x30, 0x37 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n match_count_22 > 0 and break\n eee = EarlyExit(22)\n\n\n raise eee\n end\n match_count_22 += 1\n end\n\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 70 )\n\n\n end", "def with_lineno?(node); end", "def printl!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 14 )\n\n type = PRINTL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 316:10: 'printl'\n match( \"printl\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 14 )\n\n end", "def lex_en_line_comment; end", "def lex_en_line_comment; end", "def lex_en_line_comment; end", "def _literal\n\n _save = self.pos\n while true # choice\n _tmp = apply(:_number)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_string)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_literal unless _tmp\n return _tmp\n end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def literal\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 22 )\n\n\n value = nil\n\n\n a = nil\n\n\n begin\n # at line 142:3: (a= INTEGER |a= FLOAT |a= BOOLEAN |a= STRING |a= CHAR )\n alt_38 = 5\n case look_38 = @input.peek( 1 )\n when INTEGER then alt_38 = 1\n when FLOAT then alt_38 = 2\n when BOOLEAN then alt_38 = 3\n when STRING then alt_38 = 4\n when CHAR then alt_38 = 5\n else\n @state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )\n\n\n\n raise NoViableAlternative( \"\", 38, 0 )\n\n end\n case alt_38\n when 1\n # at line 142:5: a= INTEGER\n a = match( INTEGER, TOKENS_FOLLOWING_INTEGER_IN_literal_1037 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Int, a.text) \n # <-- action\n end\n\n\n when 2\n # at line 143:5: a= FLOAT\n a = match( FLOAT, TOKENS_FOLLOWING_FLOAT_IN_literal_1047 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Float, a.text) \n # <-- action\n end\n\n\n when 3\n # at line 144:5: a= BOOLEAN\n a = match( BOOLEAN, TOKENS_FOLLOWING_BOOLEAN_IN_literal_1059 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Bool, a.text) \n # <-- action\n end\n\n\n when 4\n # at line 145:5: a= STRING\n a = match( STRING, TOKENS_FOLLOWING_STRING_IN_literal_1069 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:String, a.text) \n # <-- action\n end\n\n\n when 5\n # at line 146:5: a= CHAR\n a = match( CHAR, TOKENS_FOLLOWING_CHAR_IN_literal_1080 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Char, a.text) \n # <-- action\n end\n\n\n end\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 22 )\n\n\n end\n\n return value\n end", "def parse_nl\n s0 = @scanner.pos\n s2 = parse_newline\n if s2 == :failed\n s1 = :failed\n else\n s1 = []\n while s2 != :failed\n s1 << s2\n s2 = parse_newline\n end\n end\n if s1 == :failed\n @scanner.pos = s0\n :failed\n else\n s2 = []\n s3 = parse_skipline\n while s3 != :failed\n s2 << s3\n s3 = parse_skipline\n end\n [s1, s2]\n end\n end", "def lex_start; end", "def lex_start; end", "def lex_start; end", "def lex_start; end", "def lmatch(a, lchar, rchar)\n\n token = []\n c = a.first\n token << c until (c = a.shift; c == lchar or c == rchar or a.empty?)\n token << c\n\n if c == lchar then\n found, tokenx, remainderx = rmatch(a, lchar, rchar)\n c = found\n token << tokenx\n remainder = remainderx\n else\n remainder = a.join\n end\n\n [c, token.join, remainder]\n end", "def t__18!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 11 )\n\n type = T__18\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 27:9: 'L'\n match( 0x4c )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 11 )\n\n end", "def handled_labeled_list(line, level, margin, offset, prefix)\n prefix_length = prefix.length\n text = line.text\n flag = nil\n\n case prefix\n when /^\\[/ then\n flag = :LABELED\n prefix = prefix[1, prefix.length-2]\n when /:$/ then\n flag = :NOTE\n prefix.chop!\n else\n raise \"Invalid List Type: #{self.inspect}\"\n end\n\n # body is on the next line\n if text.length <= offset then\n original_line = line\n line = @lines.next\n return false unless line\n text = line.text\n\n for i in 0..margin\n if text[i] != SPACE\n @lines.unget\n return false\n end\n end\n\n i = margin\n i += 1 while text[i] == SPACE\n\n if i >= text.length then\n @lines.unget\n return false\n else\n offset = i\n prefix_length = 0\n\n if text[offset..-1] =~ SIMPLE_LIST_RE then\n @lines.unget\n line = original_line\n line.text = ''\n else\n @lines.delete original_line\n end\n end\n end\n\n line.stamp :LIST, level+1, prefix, flag\n text[margin, prefix_length] = \" \" * prefix_length\n assign_types_to_lines(offset, level + 1)\n return true\n end", "def parse_line(line)\n # If the previous line didn't and a logical line, we're not going to start one. If it did,\n # we're indeed going to start a new logical line\n @state[:ll_start] = @state[:ll_end]\n\n # We will start with the assumption that we're going to end the current logical line. We may layer\n # find out that we did not, in fact, do so.\n @state[:ll_end] = true\n\n # Reset the line continuator flag the the last line may have set to true\n @state[:line_continuator] = false\n\n # Find the first non-(space/tab) character\n index = 0\n while index < line.length && [\" \", \"\\t\"].include?(line[index])\n index += 1\n end\n @state[:indent_string] = line[0...index]\n\n # Iterate over the line's characters as long as there are any. We use different iteration\n # methods depending on whether we're inside a string or not\n index = 0\n while index < line.length\n if @state[:in_string].nil?\n index = parse_characters_normal(line, index)\n else\n index = parse_characters_in_string(line, index)\n end\n end\n\n # We have reached the end of the line. Decide whether or not the logical line ends here.\n @state[:ll_end] = @state[:in_string].nil? && @state[:open_braces] == 0 && !@state[:line_continuator]\n end", "def eat_eol()\n if eol_as_token # if eol is significant in the language...\n position = build_position(:lexeme)\n eol_lexeme = scanner.scan(eol_pattern) # Consume the eol text\n eol_token = [:T_EOL, RaccLexer::Token.new(eol_lexeme, eol_lexeme, position)]\n queue.unshift eol_token\n else\n scanner.scan(eol_pattern) # Consume the eol text\n end\n\n @lineno += 1\n @line_offset = scanner.pos()\n end", "def lpar!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 5 )\n\n\n\n type = LPAR\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 273:4: '('\n match( 0x28 )\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 5 )\n\n\n end", "def lt!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 11 )\n\n\n\n type = LT\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 32:5: '<'\n match( 0x3c )\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 11 )\n\n\n end", "def lt!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 27 )\n\n type = LT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 161:6: '<'\n match( 0x3c )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 27 )\n\n end", "def inc_l\n end", "def lex_en_expr_beg=(_arg0); end", "def lex_en_expr_beg=(_arg0); end", "def lex_en_expr_beg=(_arg0); end", "def double_angle_string_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 45)\n\n type = DOUBLE_ANGLE_STRING_LITERAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 500:4: '<<' ( . )* '>>'\n match(\"<<\")\n # at line 500:9: ( . )*\n loop do #loop 8\n alt_8 = 2\n look_8_0 = @input.peek(1)\n\n if (look_8_0 == ?>) \n look_8_1 = @input.peek(2)\n\n if (look_8_1 == ?>) \n alt_8 = 2\n elsif (look_8_1.between?(0x0000, ?=) || look_8_1.between?(??, 0xFFFF)) \n alt_8 = 1\n\n end\n elsif (look_8_0.between?(0x0000, ?=) || look_8_0.between?(??, 0xFFFF)) \n alt_8 = 1\n\n end\n case alt_8\n when 1\n # at line 500:9: .\n match_any\n\n else\n break #loop 8\n end\n end\n match(\">>\")\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 45)\n\n end", "def literal?(node); end", "def jump_to_line l\n l = l.clamp 0, num_lines - 1\n return if @topline == l\n @topline = l\n @botline = [l + buffer.content_height, num_lines].min\n buffer.mark_dirty!\n end", "def lex_en_line_comment=(_arg0); end", "def lex_en_line_comment=(_arg0); end", "def lex_en_line_comment=(_arg0); end", "def lex_en_expr_end; end", "def lex_en_expr_end; end", "def lex_en_expr_end; end", "def llader!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 8 )\n\n\n\n type = LLADER\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 285:4: '}'\n match( 0x7d )\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 8 )\n\n\n end", "def _EmptyLine\n\n _save = self.pos\n while true # sequence\n _tmp = scan(/\\G(?-mix:^)/)\n unless _tmp\n self.pos = _save\n break\n end\n _tmp = apply(:__hyphen_)\n unless _tmp\n self.pos = _save\n break\n end\n\n _save1 = self.pos\n while true # choice\n _tmp = apply(:_Nl)\n break if _tmp\n self.pos = _save1\n _tmp = apply(:_Comment)\n break if _tmp\n self.pos = _save1\n _tmp = apply(:_EofComment)\n break if _tmp\n self.pos = _save1\n break\n end # end choice\n\n unless _tmp\n self.pos = _save\n end\n break\n end # end sequence\n\n set_failed_rule :_EmptyLine unless _tmp\n return _tmp\n end", "def peek_lit(hint)\n pos0 = @pos\n while lit = next_input_element(hint) and (lit.ws? or lit.lt?)\n end\n @pos = pos0\n lit\n end", "def lt!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 55 )\n\n type = LT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 382:6: '<'\n match( 0x3c )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 55 )\n\n end", "def next_char\n @pos += 1\n if (c = @source[@pos..@pos]) == BACKSLASH\n @pos += 1\n [true, @source[@pos..@pos]]\n else\n [false, c]\n end\n end", "def _rl_erase_at_end_of_line(l)\r\n _rl_backspace(l)\r\n @rl_outstream.write(' '*l)\r\n _rl_backspace(l)\r\n @_rl_last_c_pos -= l\r\n @visible_line[@_rl_last_c_pos,l] = 0.chr * l\r\n @rl_display_fixed = true if !@rl_display_fixed\r\n end", "def is_rpar(latex, step)\n\tlatex[step+1..step+5].join == \"right)\"\nend", "def lshift!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 43 )\n\n type = LSHIFT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 164:10: '<<'\n match( \"<<\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 43 )\n\n end", "def is_literal?\n if @operator && @operator.is_negation?\n @right_sentence.is_literal?\n else\n @left_sentence.nil? && @right_sentence.nil?\n end\n end", "def la( count = 1 )\n until @lookahead.length >= count\n if token = @lexer.next_token( @lexer_state.number ) then\n @lookahead << token\n else\n nyi \"error handling for lexer error\" if @lexer.input_remaining?\n break\n end\n end\n \n return @lookahead[count - 1]\n end", "def fresh_line?\n @content.empty? || @content[-1].eql?(NL)\n end", "def leq!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 33 )\n\n type = LEQ\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 154:7: '<='\n match( \"<=\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 33 )\n\n end", "def push_literal(*args)\n new_literal = Literal.new(self, *args)\n @literal_stack.push(new_literal)\n\n if new_literal.words? && new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_backslash_delimited_words\n else\n self.class.lex_en_plain_backslash_delimited_words\n end\n elsif new_literal.words? && !new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_words\n else\n self.class.lex_en_plain_words\n end\n elsif !new_literal.words? && new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_backslash_delimited\n else\n self.class.lex_en_plain_backslash_delimited\n end\n else\n if new_literal.interpolate?\n self.class.lex_en_interp_string\n else\n self.class.lex_en_plain_string\n end\n end\n end", "def t__19!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 12 )\n\n type = T__19\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 28:9: 'l'\n match( 0x6c )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 12 )\n\n end", "def t__31!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 2 )\n\n\n\n type = T__31\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 8:9: '!='\n match( \"!=\" )\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 2 )\n\n\n end", "def process_new_line\n @status.line += 1\n\n @skip_next = true if ((@cur_char == \"\\n\" && @next_char == \"\\r\") ||\n (@cur_char == \"\\r\" && @next_char == \"\\n\"))\n end", "def t__33!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 4 )\n\n\n\n type = T__33\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 10:9: '&&'\n match( \"&&\" )\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 4 )\n\n\n end", "def nextLine\r\n\t\twhile (@allLines[0] == \"\" || @allLines[0] == \"\\r\\n\" || @allLines[0] == \"\\n\")\r\n\t\t\[email protected]\r\n\t\tend\r\n\t\tif(@allLines[0]!=nil)\r\n\t\t\t@Line = @allLines[0]\r\n\t\t\[email protected]\r\n\t\t\tcheckSpace\r\n\t\tend\r\n\tend", "def le!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 57 )\n\n type = LE\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 384:6: '<='\n match( \"<=\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 57 )\n\n end", "def lbracket!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 43 )\n\n type = LBRACKET\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 179:12: '('\n match( 0x28 )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 43 )\n\n end", "def parse_line_break; end", "def correct_for_literals(lineno, column)\n tstring_index = @lex.index do |pos, token|\n pos[0] == lineno and pos[1] == column and\n token == :on_tstring_content\n end\n\n tstring_index ? @lex[tstring_index -1][0][1] : column\n end", "def t__23!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 12 )\n\n type = T__23\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 28:9: 'l'\n match( 0x6c )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 12 )\n\n end", "def next_l\n raise \"implement in subclass\"\n end", "def skip_to_eoln\r\n @source.get until @source.eoln?\r\n true\r\n end", "def decimal_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 69 )\n\n\n\n type = DecimalLiteral\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 525:18: ( '0' | '1' .. '9' ( '0' .. '9' )* )\n # at line 525:18: ( '0' | '1' .. '9' ( '0' .. '9' )* )\n alt_21 = 2\n look_21_0 = @input.peek( 1 )\n\n if ( look_21_0 == 0x30 )\n alt_21 = 1\n elsif ( look_21_0.between?( 0x31, 0x39 ) )\n alt_21 = 2\n else\n raise NoViableAlternative( \"\", 21, 0 )\n\n end\n case alt_21\n when 1\n # at line 525:19: '0'\n match( 0x30 )\n\n when 2\n # at line 525:25: '1' .. '9' ( '0' .. '9' )*\n match_range( 0x31, 0x39 )\n # at line 525:34: ( '0' .. '9' )*\n while true # decision 20\n alt_20 = 2\n look_20_0 = @input.peek( 1 )\n\n if ( look_20_0.between?( 0x30, 0x39 ) )\n alt_20 = 1\n\n end\n case alt_20\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x30, 0x39 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n break # out of loop for decision 20\n end\n end # loop for decision 20\n\n\n end\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 69 )\n\n\n end", "def lte!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 12 )\n\n\n\n type = LTE\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 33:6: '<='\n match( \"<=\" )\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 12 )\n\n\n end", "def t__22!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 11 )\n\n type = T__22\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 27:9: 'L'\n match( 0x4c )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 11 )\n\n end", "def line(pos = T.unsafe(nil)); end", "def char_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 41)\n\n type = CHAR_LITERAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 482:4: '\\\\'' LITERAL_CHAR '\\\\''\n match(?\\')\n literal_char!\n match(?\\')\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 41)\n\n end", "def same_line_bracket_block?(result, iter, call_sexp)\n call_sexp.shift # discard the sexp_type, as the processor would\n syntactic = !Call.new(processor).arguments?(call_sexp) || iter.end_with?(\")\")\n stylistic = result !~ /\\n/ && result.size < LINE_LENGTH\n syntactic && stylistic\n end", "def read_next()\n return nil if @at_end\n\n begin\n pos = @marker.position\n\n if @marker.character == ?\\n\n pos.line += 1\n pos.column = 0\n end\n\n @marker.character = @reader.next\n @marker.source_index += 1\n pos.column += 1\n rescue StopIteration\n @at_end = true\n @marker.character = nil\n end\n\n @marker.character\n end", "def lit(sexp, level)\n val = sexp.shift\n case val\n when Numeric\n val.inspect\n when Symbol\n @symbols[val.to_s] ||= \"$symbol_#{@sym_id += 1}\"\n when Regexp\n val == // ? /^/.inspect : val.inspect\n when Range\n \"$range(#{val.begin}, #{val.end}, #{val.exclude_end?})\"\n else\n raise \"Bad lit: #{val.inspect}\"\n end\n end", "def sl_comment!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 39)\n\n type = SL_COMMENT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 470:5: '//' ( ' $ANTLR ' SRC | (~ ( '\\\\r' | '\\\\n' ) )* ) ( '\\\\r' )? '\\\\n'\n match(\"//\")\n # at line 471:5: ( ' $ANTLR ' SRC | (~ ( '\\\\r' | '\\\\n' ) )* )\n alt_2 = 2\n alt_2 = @dfa2.predict(@input)\n case alt_2\n when 1\n # at line 471:7: ' $ANTLR ' SRC\n match(\" $ANTLR \")\n src!\n\n when 2\n # at line 472:6: (~ ( '\\\\r' | '\\\\n' ) )*\n # at line 472:6: (~ ( '\\\\r' | '\\\\n' ) )*\n loop do #loop 1\n alt_1 = 2\n look_1_0 = @input.peek(1)\n\n if (look_1_0.between?(0x0000, ?\\t) || look_1_0.between?(0x000B, ?\\f) || look_1_0.between?(0x000E, 0xFFFF)) \n alt_1 = 1\n\n end\n case alt_1\n when 1\n # at line 472:6: ~ ( '\\\\r' | '\\\\n' )\n if @input.peek(1).between?(0x0000, ?\\t) || @input.peek(1).between?(0x000B, ?\\f) || @input.peek(1).between?(0x000E, 0x00FF)\n @input.consume\n else\n mse = MismatchedSet(nil)\n recover(mse)\n raise mse\n end\n\n\n\n else\n break #loop 1\n end\n end\n\n end\n # at line 474:3: ( '\\\\r' )?\n alt_3 = 2\n look_3_0 = @input.peek(1)\n\n if (look_3_0 == ?\\r) \n alt_3 = 1\n end\n case alt_3\n when 1\n # at line 474:3: '\\\\r'\n match(?\\r)\n\n end\n match(?\\n)\n # --> action\n channel=HIDDEN;\n # <-- action\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 39)\n\n end", "def new_line?\n ary = insertion_point\n ary.empty? || ary.all? {|stmt| stmt.type == :code }\n end", "def lex(input)\n line = 1\n offset = 0\n ending = input.length\n\n until offset == ending do\n next_token(input, offset, line).tap do |token|\n raise UnconsumedInputError,\n \"Unmatched input #{input[offset..-1].inspect} on line #{line}\" if token.nil?\n\n token[:offset] = offset\n line, token[:line] = token[:line], line\n offset += token[:value].length\n yield token unless token[:discarded]\n end\n end\n\n yield ({ :name => :$end, :line => line, :value => nil, :offset => offset })\n end", "def lshift_asgn!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 46 )\n\n type = LSHIFT_ASGN\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 167:15: '<<='\n match( \"<<=\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 46 )\n\n end", "def opening_brace_on_same_line?(node); end", "def lex\n return enum_for(__method__) unless block_given?\n @src.each_line do |line|\n yield match(line)\n @lineno += 1\n end\n eof = Line.new(\"\", @lineno, :eof, \"\", \"\", \"\")\n loop { yield eof }\n end", "def lex_en_expr_mid; end" ]
[ "0.63618225", "0.6109549", "0.608009", "0.6075154", "0.6075154", "0.6075154", "0.59064585", "0.58042985", "0.5745842", "0.5745842", "0.5745842", "0.55970234", "0.55970234", "0.55970234", "0.55190486", "0.5509483", "0.5465702", "0.54279286", "0.5418955", "0.5417631", "0.5411698", "0.54096174", "0.5408331", "0.5406038", "0.5395812", "0.535679", "0.535679", "0.535679", "0.5347634", "0.53355306", "0.53355306", "0.53355306", "0.53355306", "0.5307638", "0.5298745", "0.5247298", "0.5247298", "0.5247298", "0.5247298", "0.5216821", "0.5197778", "0.519618", "0.5194669", "0.51784277", "0.5166941", "0.5160377", "0.5160167", "0.5136275", "0.5133279", "0.5133279", "0.5133279", "0.5123261", "0.51185864", "0.5115948", "0.51090384", "0.51090384", "0.51090384", "0.510635", "0.510635", "0.510635", "0.51050305", "0.51022935", "0.50944245", "0.50888026", "0.5070169", "0.50677717", "0.5062601", "0.5054132", "0.5048757", "0.50396824", "0.5036199", "0.50297564", "0.5015323", "0.5014224", "0.5009569", "0.50084907", "0.5003186", "0.4995321", "0.49932587", "0.49931288", "0.49883398", "0.49796256", "0.49778587", "0.4976656", "0.49755856", "0.49704847", "0.49704796", "0.4954523", "0.49538583", "0.4945663", "0.4939036", "0.493862", "0.4938523", "0.4935916", "0.49320945", "0.4930506", "0.492864", "0.49276704", "0.49230444", "0.4921292" ]
0.6321066
1
fetch next literal. position is not forwarded. white spaces and line terminators are skipped and ignored.
def peek_lit(hint) pos0 = @pos while lit = next_input_element(hint) and (lit.ws? or lit.lt?) end @pos = pos0 lit end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_token\n result = peek_token\n @start = @finish\n return result if @start >= @expr.length\n\n if @expr[@start].numeric?\n @finish = @start + 1\n while @finish < @expr.length && @expr[@finish].to_s.numeric?\n @finish = @finish + 1\n end\n else\n @finish = @start + 1\n end\n result\n end", "def parse_lit\n case l.front.type\n when :str then parse_str_lit\n when :chr then parse_char_lit\n when :num then parse_num_lit\n else\n error \"expected a literal\"\n end\n end", "def literal\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 22 )\n\n\n value = nil\n\n\n a = nil\n\n\n begin\n # at line 142:3: (a= INTEGER |a= FLOAT |a= BOOLEAN |a= STRING |a= CHAR )\n alt_38 = 5\n case look_38 = @input.peek( 1 )\n when INTEGER then alt_38 = 1\n when FLOAT then alt_38 = 2\n when BOOLEAN then alt_38 = 3\n when STRING then alt_38 = 4\n when CHAR then alt_38 = 5\n else\n @state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )\n\n\n\n raise NoViableAlternative( \"\", 38, 0 )\n\n end\n case alt_38\n when 1\n # at line 142:5: a= INTEGER\n a = match( INTEGER, TOKENS_FOLLOWING_INTEGER_IN_literal_1037 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Int, a.text) \n # <-- action\n end\n\n\n when 2\n # at line 143:5: a= FLOAT\n a = match( FLOAT, TOKENS_FOLLOWING_FLOAT_IN_literal_1047 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Float, a.text) \n # <-- action\n end\n\n\n when 3\n # at line 144:5: a= BOOLEAN\n a = match( BOOLEAN, TOKENS_FOLLOWING_BOOLEAN_IN_literal_1059 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Bool, a.text) \n # <-- action\n end\n\n\n when 4\n # at line 145:5: a= STRING\n a = match( STRING, TOKENS_FOLLOWING_STRING_IN_literal_1069 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:String, a.text) \n # <-- action\n end\n\n\n when 5\n # at line 146:5: a= CHAR\n a = match( CHAR, TOKENS_FOLLOWING_CHAR_IN_literal_1080 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Char, a.text) \n # <-- action\n end\n\n\n end\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 22 )\n\n\n end\n\n return value\n end", "def _literal\n\n _save = self.pos\n while true # choice\n _tmp = apply(:_number)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_string)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_literal unless _tmp\n return _tmp\n end", "def literal\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 33 )\n return_value = LiteralReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n root_0 = nil\n\n _last = _first_0 = nil\n string_literal267 = nil\n __IVAR268__ = nil\n __ID269__ = nil\n string_literal270 = nil\n string_literal271 = nil\n string_literal272 = nil\n string_literal273 = nil\n __NUMBER274__ = nil\n __STRING275__ = nil\n __DOC276__ = nil\n __REGEX277__ = nil\n __ARRAY278__ = nil\n __OBJECT280__ = nil\n string_literal282 = nil\n __ID283__ = nil\n string_literal286 = nil\n argument279 = nil\n property_definition281 = nil\n parameters284 = nil\n statement_block285 = nil\n parameters287 = nil\n statement_block288 = nil\n\n tree_for_string_literal267 = nil\n tree_for_IVAR268 = nil\n tree_for_ID269 = nil\n tree_for_string_literal270 = nil\n tree_for_string_literal271 = nil\n tree_for_string_literal272 = nil\n tree_for_string_literal273 = nil\n tree_for_NUMBER274 = nil\n tree_for_STRING275 = nil\n tree_for_DOC276 = nil\n tree_for_REGEX277 = nil\n tree_for_ARRAY278 = nil\n tree_for_OBJECT280 = nil\n tree_for_string_literal282 = nil\n tree_for_ID283 = nil\n tree_for_string_literal286 = nil\n\n begin\n # at line 229:3: ( 'this' | IVAR | ID | 'null' | 'true' | 'false' | 'undefined' | NUMBER | STRING | DOC | REGEX | ^( ARRAY ( argument )* ) | ^( OBJECT ( property_definition )* ) | ^( 'function' ( ID )? parameters statement_block ) | ^( '->' ( parameters )? statement_block ) )\n alt_41 = 15\n case look_41 = @input.peek( 1 )\n when THIS then alt_41 = 1\n when IVAR then alt_41 = 2\n when ID then alt_41 = 3\n when NULL then alt_41 = 4\n when TRUE then alt_41 = 5\n when FALSE then alt_41 = 6\n when UNDEFINED then alt_41 = 7\n when NUMBER then alt_41 = 8\n when STRING then alt_41 = 9\n when DOC then alt_41 = 10\n when REGEX then alt_41 = 11\n when ARRAY then alt_41 = 12\n when OBJECT then alt_41 = 13\n when FUNCTION then alt_41 = 14\n when ARROW then alt_41 = 15\n else\n raise NoViableAlternative( \"\", 41, 0 )\n end\n case alt_41\n when 1\n root_0 = @adaptor.create_flat_list\n\n\n # at line 229:5: 'this'\n _last = @input.look\n string_literal267 = match( THIS, TOKENS_FOLLOWING_THIS_IN_literal_1643 )\n\n tree_for_string_literal267 = @adaptor.copy_node( string_literal267 )\n\n @adaptor.add_child( root_0, tree_for_string_literal267 )\n\n\n\n when 2\n root_0 = @adaptor.create_flat_list\n\n\n # at line 230:5: IVAR\n _last = @input.look\n __IVAR268__ = match( IVAR, TOKENS_FOLLOWING_IVAR_IN_literal_1649 )\n\n tree_for_IVAR268 = @adaptor.copy_node( __IVAR268__ )\n\n @adaptor.add_child( root_0, tree_for_IVAR268 )\n\n\n\n when 3\n root_0 = @adaptor.create_flat_list\n\n\n # at line 231:5: ID\n _last = @input.look\n __ID269__ = match( ID, TOKENS_FOLLOWING_ID_IN_literal_1655 )\n\n tree_for_ID269 = @adaptor.copy_node( __ID269__ )\n\n @adaptor.add_child( root_0, tree_for_ID269 )\n\n\n\n when 4\n root_0 = @adaptor.create_flat_list\n\n\n # at line 232:5: 'null'\n _last = @input.look\n string_literal270 = match( NULL, TOKENS_FOLLOWING_NULL_IN_literal_1661 )\n\n tree_for_string_literal270 = @adaptor.copy_node( string_literal270 )\n\n @adaptor.add_child( root_0, tree_for_string_literal270 )\n\n\n\n when 5\n root_0 = @adaptor.create_flat_list\n\n\n # at line 233:5: 'true'\n _last = @input.look\n string_literal271 = match( TRUE, TOKENS_FOLLOWING_TRUE_IN_literal_1667 )\n\n tree_for_string_literal271 = @adaptor.copy_node( string_literal271 )\n\n @adaptor.add_child( root_0, tree_for_string_literal271 )\n\n\n\n when 6\n root_0 = @adaptor.create_flat_list\n\n\n # at line 234:5: 'false'\n _last = @input.look\n string_literal272 = match( FALSE, TOKENS_FOLLOWING_FALSE_IN_literal_1673 )\n\n tree_for_string_literal272 = @adaptor.copy_node( string_literal272 )\n\n @adaptor.add_child( root_0, tree_for_string_literal272 )\n\n\n\n when 7\n root_0 = @adaptor.create_flat_list\n\n\n # at line 235:5: 'undefined'\n _last = @input.look\n string_literal273 = match( UNDEFINED, TOKENS_FOLLOWING_UNDEFINED_IN_literal_1679 )\n\n tree_for_string_literal273 = @adaptor.copy_node( string_literal273 )\n\n @adaptor.add_child( root_0, tree_for_string_literal273 )\n\n\n\n when 8\n root_0 = @adaptor.create_flat_list\n\n\n # at line 236:5: NUMBER\n _last = @input.look\n __NUMBER274__ = match( NUMBER, TOKENS_FOLLOWING_NUMBER_IN_literal_1685 )\n\n tree_for_NUMBER274 = @adaptor.copy_node( __NUMBER274__ )\n\n @adaptor.add_child( root_0, tree_for_NUMBER274 )\n\n\n\n when 9\n root_0 = @adaptor.create_flat_list\n\n\n # at line 237:5: STRING\n _last = @input.look\n __STRING275__ = match( STRING, TOKENS_FOLLOWING_STRING_IN_literal_1691 )\n\n tree_for_STRING275 = @adaptor.copy_node( __STRING275__ )\n\n @adaptor.add_child( root_0, tree_for_STRING275 )\n\n\n\n when 10\n root_0 = @adaptor.create_flat_list\n\n\n # at line 238:5: DOC\n _last = @input.look\n __DOC276__ = match( DOC, TOKENS_FOLLOWING_DOC_IN_literal_1697 )\n\n tree_for_DOC276 = @adaptor.copy_node( __DOC276__ )\n\n @adaptor.add_child( root_0, tree_for_DOC276 )\n\n\n\n when 11\n root_0 = @adaptor.create_flat_list\n\n\n # at line 239:5: REGEX\n _last = @input.look\n __REGEX277__ = match( REGEX, TOKENS_FOLLOWING_REGEX_IN_literal_1703 )\n\n tree_for_REGEX277 = @adaptor.copy_node( __REGEX277__ )\n\n @adaptor.add_child( root_0, tree_for_REGEX277 )\n\n\n\n when 12\n root_0 = @adaptor.create_flat_list\n\n\n # at line 240:5: ^( ARRAY ( argument )* )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __ARRAY278__ = match( ARRAY, TOKENS_FOLLOWING_ARRAY_IN_literal_1711 )\n\n tree_for_ARRAY278 = @adaptor.copy_node( __ARRAY278__ )\n\n root_1 = @adaptor.become_root( tree_for_ARRAY278, root_1 )\n\n\n\n if @input.peek == DOWN\n match( DOWN, nil )\n # at line 240:14: ( argument )*\n while true # decision 37\n alt_37 = 2\n look_37_0 = @input.peek( 1 )\n\n if ( look_37_0.between?( AMP, AMP_ASGN ) || look_37_0 == POST_DECR || look_37_0.between?( GEQ, AREF ) || look_37_0.between?( GREATER, HAT ) || look_37_0.between?( ARROW, HAT_ASGN ) || look_37_0 == ASGN || look_37_0 == REGEX || look_37_0 == IN || look_37_0 == INCR || look_37_0.between?( INSTANCEOF, RSHIFT3 ) || look_37_0 == RSHIFT3_ASGN || look_37_0.between?( RSHIFT_ASGN, COLON ) || look_37_0 == LEQ || look_37_0.between?( LESS, SLASH ) || look_37_0 == SLASH_ASGN || look_37_0.between?( STAR, DECR ) || look_37_0 == STAR_ASGN || look_37_0 == LSHIFT || look_37_0.between?( DELETE, THIS ) || look_37_0.between?( MINUS, TILDE ) || look_37_0.between?( MINUS_ASGN, MOD ) || look_37_0.between?( MOD_ASGN, TYPEOF ) || look_37_0.between?( NEQ, UMINUS ) || look_37_0.between?( NEQQ, UNDEFINED ) || look_37_0 == NEW || look_37_0 == NOT || look_37_0.between?( NULL, UPLUS ) || look_37_0 == OBJECT || look_37_0.between?( EQ, OR_ASGN ) || look_37_0 == FALSE || look_37_0 == PIPE || look_37_0 == PIPE_ASGN || look_37_0 == PLUS || look_37_0.between?( ID, DOC ) )\n alt_37 = 1\n\n end\n case alt_37\n when 1\n # at line 240:14: argument\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_argument_IN_literal_1713 )\n argument279 = argument\n @state.following.pop\n\n @adaptor.add_child( root_1, argument279.tree )\n\n\n else\n break # out of loop for decision 37\n end\n end # loop for decision 37\n\n match( UP, nil )\n end\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 13\n root_0 = @adaptor.create_flat_list\n\n\n # at line 241:5: ^( OBJECT ( property_definition )* )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __OBJECT280__ = match( OBJECT, TOKENS_FOLLOWING_OBJECT_IN_literal_1724 )\n\n tree_for_OBJECT280 = @adaptor.copy_node( __OBJECT280__ )\n\n root_1 = @adaptor.become_root( tree_for_OBJECT280, root_1 )\n\n\n\n if @input.peek == DOWN\n match( DOWN, nil )\n # at line 241:15: ( property_definition )*\n while true # decision 38\n alt_38 = 2\n look_38_0 = @input.peek( 1 )\n\n if ( look_38_0 == GET || look_38_0 == COLON || look_38_0 == SET )\n alt_38 = 1\n\n end\n case alt_38\n when 1\n # at line 241:15: property_definition\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_property_definition_IN_literal_1726 )\n property_definition281 = property_definition\n @state.following.pop\n\n @adaptor.add_child( root_1, property_definition281.tree )\n\n\n else\n break # out of loop for decision 38\n end\n end # loop for decision 38\n\n match( UP, nil )\n end\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 14\n root_0 = @adaptor.create_flat_list\n\n\n # at line 242:5: ^( 'function' ( ID )? parameters statement_block )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal282 = match( FUNCTION, TOKENS_FOLLOWING_FUNCTION_IN_literal_1737 )\n\n tree_for_string_literal282 = @adaptor.copy_node( string_literal282 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal282, root_1 )\n\n\n\n match( DOWN, nil )\n # at line 242:19: ( ID )?\n alt_39 = 2\n look_39_0 = @input.peek( 1 )\n\n if ( look_39_0 == ID )\n alt_39 = 1\n end\n case alt_39\n when 1\n # at line 242:19: ID\n _last = @input.look\n __ID283__ = match( ID, TOKENS_FOLLOWING_ID_IN_literal_1739 )\n\n tree_for_ID283 = @adaptor.copy_node( __ID283__ )\n\n @adaptor.add_child( root_1, tree_for_ID283 )\n\n\n\n end\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_parameters_IN_literal_1742 )\n parameters284 = parameters\n @state.following.pop\n\n @adaptor.add_child( root_1, parameters284.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_statement_block_IN_literal_1744 )\n statement_block285 = statement_block\n @state.following.pop\n\n @adaptor.add_child( root_1, statement_block285.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 15\n root_0 = @adaptor.create_flat_list\n\n\n # at line 243:5: ^( '->' ( parameters )? statement_block )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal286 = match( ARROW, TOKENS_FOLLOWING_ARROW_IN_literal_1754 )\n\n tree_for_string_literal286 = @adaptor.copy_node( string_literal286 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal286, root_1 )\n\n\n\n match( DOWN, nil )\n # at line 243:13: ( parameters )?\n alt_40 = 2\n look_40_0 = @input.peek( 1 )\n\n if ( look_40_0 == PARAMS )\n alt_40 = 1\n end\n case alt_40\n when 1\n # at line 243:13: parameters\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_parameters_IN_literal_1756 )\n parameters287 = parameters\n @state.following.pop\n\n @adaptor.add_child( root_1, parameters287.tree )\n\n\n end\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_statement_block_IN_literal_1759 )\n statement_block288 = statement_block\n @state.following.pop\n\n @adaptor.add_child( root_1, statement_block288.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n end\n return_value.tree = @adaptor.rule_post_processing( root_0 )\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 33 )\n\n end\n \n return return_value\n end", "def read_next()\n return nil if @at_end\n\n begin\n pos = @marker.position\n\n if @marker.character == ?\\n\n pos.line += 1\n pos.column = 0\n end\n\n @marker.character = @reader.next\n @marker.source_index += 1\n pos.column += 1\n rescue StopIteration\n @at_end = true\n @marker.character = nil\n end\n\n @marker.character\n end", "def next_token; end", "def next()\n if @ss.scan_until(token_re)\n term = @ss.matched\n term_end = @ss.pos\n term_start = term_end - term.size\n else\n return nil\n end\n\n return Token.new(normalize(term), term_start, term_end)\n end", "def next_token\n\n if @ss.bol?\n @line+=1\n @[email protected]\n end\n\n position=[@line,@ss.pos-@old_pos+1]\n\n return :eos if @ss.eos?\n\n case\n when text = @ss.scan(NEWLINE)\n next_token()\n when text = @ss.scan(SPACE)\n next_token()\n when text = @ss.scan(COMMENT)\n next_token()\n when text = @ss.scan(ARROW)\n return Token.new [:arrow,text,position]\n when text = @ss.scan(LT)\n return Token.new [:lt,text,position]\n when text = @ss.scan(LBRACK)\n return Token.new [:lbrack,text,position]\n when text = @ss.scan(RBRACK)\n return Token.new [:rbrack,text,position]\n when text = @ss.scan(IDENTIFIER)\n case\n when value = text.match(IDENT)\n return Token.new [:IDENT,text,position]\n when value = text.match(FLOAT)\n return Token.new [:FLOAT,text,position]\n when value = text.match(INT)\n return Token.new [:INT,text,position]\n when value = text.match(STRING)\n return Token.new [:STRING,text,position]\n when value = text.match(MODULE)\n return Token.new [:module,text,position]\n when value = text.match(CLASS)\n return Token.new [:class,text,position]\n when value = text.match(END_)\n return Token.new [:end,text,position]\n when value = text.match(ATTR)\n return Token.new [:attr,text,position]\n when value = text.match(LPAREN)\n return Token.new [:lparen,text,position]\n when value = text.match(RPAREN)\n return Token.new [:rparen,text,position]\n else\n return Token.new [:identifier,text,position]\n end\n else\n x = @ss.getch\n return Token.new [x, x,position]\n end\n end", "def literal(buffer)\n reader = lambda { |string = ''|\n buffer.major_mode.read(1) do |event|\n if unicode = event.unicode\n string += unicode # copy\n buffer.message string.inspect\n\n case result = literal_handle(buffer, string)\n when nil\n reader.call(string)\n when String\n literal_insert(buffer, result)\n end\n else\n return # Unverrichteter Dinge\n end\n end\n }\n\n reader.call\n end", "def next\n ret = peek_next\n @str.slice! @last_re if ret.type != :eos\n\n ret\n end", "def peek_next\n fail 'No string specified' unless @str\n\n return Token.new(:eos) if skip_space == :eos\n\n PATTERNS.each do |re, func|\n re.match(@str) do |mat|\n @last_re = re # This is what will be removed\n mat = mat.to_s\n return func.is_a?(Symbol) ? send(func, mat) : instance_exec(mat, &func)\n end\n end\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n if ss.peek(1) == \"\\n\"\n self.lineno += 1\n # line starts 1 position after the newline\n self.start_of_current_line_pos = ss.pos + 1\n end\n self.old_pos = ss.pos\n token =\n case state\n when nil then\n case\n when ss.skip(/[ \\t]+/) then\n # do nothing\n when ss.skip(/\\/\\/[^\\r\\n]*/) then\n # do nothing\n when text = ss.scan(/\\r|\\n/) then\n newline text\n when text = ss.scan(/[!=<>]=?/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/[(){},;.\\-+\\/*]/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/#{DIGIT}+(\\.#{DIGIT}+)?/) then\n action { [:NUMBER, text] }\n when text = ss.scan(/nil/) then\n action { [:NIL, text] }\n when text = ss.scan(/false/) then\n action { [:FALSE, text] }\n when text = ss.scan(/true/) then\n action { [:TRUE, text] }\n when text = ss.scan(/#{ALPHA}(#{ALPHA}|#{DIGIT})*/) then\n action { [:IDENTIFIER, text] }\n when ss.skip(/\"\"/) then\n action { [:STRING, '\"\"'] }\n when ss.skip(/\"/) then\n [:state, :IN_STRING]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n when :IN_STRING then\n case\n when text = ss.scan(/[^\"]+/) then\n action { [:STRING, \"\\\"#{text}\\\"\"] }\n when ss.skip(/\"/) then\n [:state, nil]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def next_token\n @sy = @tokenizer.next_token\n \n # ignore EOL tokens since no productions would accept them\n while @sy.type == TokenType::EOL_TOKEN\n @sy = @tokenizer.next_token\n end\n end", "def literal_token\n if match = @chunk.match(OPERATOR)\n value, _ = *match\n else\n value = @chunk[0]\n end\n tag = value\n\n if COMPOUND_ASSIGN.include?(value)\n tag = :COP\n else\n case value\n when '(', '{', '[' then @ends.push(INVERSES[value])\n when ')', '}', ']'\n prev = @tokens[-1]\n pair(value)\n tokens.delete_at(-1) if prev && prev[0] == :TERM\n end\n end\n token(tag, value)\n value.size\n end", "def lex_en_expr_beg; end", "def lex_en_expr_beg; end", "def lex_en_expr_beg; end", "def process_lit(exp)\n # TODO what about floats and big numbers?\n\n value = exp.shift\n c_type = exp.c_type\n case c_type\n when CType.long, CType.float then\n return value.to_s\n when CType.symbol then\n return value.to_s.inspect # HACK wrong! write test!\n else\n raise \"Bug! no: Unknown literal #{value}:#{value.class}\"\n end\n end", "def next_token\n #dputs \"@line: \" + @line\n if @state == :normal\n while true\n temp = _next_token\n unless temp == \"#white_space\" || temp == \"#comment\"\n break\n end\n end\n #dputs \"token: \" + temp\n @current_token = temp\n return temp\n else\n return :Terminate\n end\n \n end", "def next_token\n return @extra_tokens.pop unless @extra_tokens.empty?\n\n skip_whitespace\n c = @sql[@pos, 1]\n return next_string(c) if quote?(c)\n\n first_is_identifier_char = identifier_char?(c)\n t = c\n @pos += 1\n while @pos < @length\n c = @sql[@pos, 1]\n break if c == ' '\n\n this_is_identifier_char = identifier_char?(c)\n break if first_is_identifier_char != this_is_identifier_char && @length > 0\n break if !this_is_identifier_char && quote?(c)\n\n t << c\n @pos += 1\n end\n\n case t\n when ''\n nil\n when /^\\d+$/\n t.to_i\n else\n t\n end\n end", "def next\n token = next_token\n token = next_token while token&.empty?\n token\n end", "def literal; end", "def next_token\n token = @enum[@pointer]\n raise NonstringTokenError unless token.nil? || token.kind_of?(String) \n @pointer += 1\n token\n end", "def next\n\t\tif @next_token\n\t\t\ttoken = @next_token\n\t\t\t@next_token = nil\n\t\t\treturn token\n\t\telse\n\t\t\ttoken = read_token\n\t\t\treturn token\n\t\tend\n\tend", "def next_input_element(hint)\n if ret = @lit_cache[@pos]\n @pos = @lit_nextpos[@pos]\n @head_pos = @pos\n return ret\n end\n pos0 = @pos\n #\n # skip white space here, because ECMA262(5.1.2) says:\n #\n # Simple white space and single-line comments are discarded and\n # do not appear in the stream of input elements for the\n # syntactic grammar.\n #\n while white_space or single_line_comment\n end\n\n ret = line_terminator || multi_line_comment || token\n if ret\n @lit_cache[pos0] = ret\n @lit_nextpos[pos0] = @pos\n @head_pos = @pos\n return ret\n end\n\n if @codes[@pos].nil?\n return nil\n end\n if hint.nil?\n if @codes[@pos] == 0x2f\n ECMA262::LIT_DIV_OR_REGEXP_LITERAL\n else\n nil\n end\n elsif hint == :div\n ret = div_punctuator\n if ret\n @lit_cache[pos0] = ret\n @lit_nextpos[pos0] = @pos\n end\n @head_pos = @pos\n return ret\n elsif hint == :regexp\n ret = regexp_literal\n if ret\n @lit_cache[pos0] = ret\n @lit_nextpos[pos0] = @pos\n end\n @head_pos = @pos\n return ret\n else\n if @codes[@pos] == 0x2f\n ECMA262::LIT_DIV_OR_REGEXP_LITERAL\n else\n nil\n end\n end\n end", "def next_token\n tokens.shift\n end", "def peek_token\n return nil if @start >= @expr.length\n if @start == 0 && @finish == 0\n return @expr[0]\n else\n token = @expr[@start...@finish]\n\n if token.empty?\n @finish = @finish + 1\n peek_token\n else\n return token\n end\n end\n end", "def next_item\n return @last_lexeme if @last_lexeme[0].nil?\n while true\n @line = next_line if buffer_empty?\n if @line.nil?\n lexeme = [nil, @line_no, 1]\n break\n end\n\n # Skip whitespaces\n while space?(@line[@pos])\n @pos += 1\n end\n\n # Skip triple dot characters (join lines)\n if @line[@pos, 4] == \"...\\n\" || @line[@pos, 2] == \"…\\n\"\n line_no, pos = @line_no, @pos + 1\n @line, @pos = next_line, 0\n if @line.nil? || @line.strip.empty?\n raise SyntaxError.new(line_no, pos, 'Line continuation may not be followed by an empty line')\n end\n next\n end\n\n # Skip one line comments\n if @line[@pos, 3] == 'BTW'\n @pos = @line.length - 1\n end\n # and multiline ones\n if @last_lexeme[0] == \"\\n\" && @line[@pos, 4] == 'OBTW'\n tldr_found, line_no, pos = false, @line_no, @pos + 1\n while true\n @line = next_line\n break if @line.nil?\n m = @line.chomp.match(/(^|\\s+)TLDR\\s*(,|$)/)\n unless m.nil?\n tldr_found = true\n @pos = m.end(0)\n break\n end\n end\n unless tldr_found\n raise SyntaxError.new(line_no, pos, 'Unterminated multiline comment')\n end\n next\n end\n\n if @line[@pos] == \"\\n\" || @line[@pos] == '!'\n # Handle newline and bang separately\n lexeme = [@line[@pos], @line_no, @pos + 1]\n @pos += 1\n elsif @line[@pos] == ','\n # Comma is a virtual newline\n lexeme = [\"\\n\", @line_no, @pos + 1]\n @pos += 1\n elsif @line[@pos] == '\"'\n # Strings begin with \"\n # Need to handle empty strings separately\n if @line[@pos + 1] == '\"'\n string = '\"\"'\n else\n m = @line.match(/([^:](?:::)*)\"/, @pos + 1)\n string = @line[@pos..m.end(0) - 1] unless m.nil?\n end\n # String must be followed by an allowed lexeme delimiter\n if string.nil? || !lexeme_delimiter?(@pos + string.length)\n raise SyntaxError.new(@line_no, @pos + 1, 'Unterminated string constant')\n end\n lexeme = [%Q[\"#{escape_string(string[1..-2])}\"], @line_no, @pos + 1]\n @pos = @pos + string.length\n else\n # Grab as much characters as we can until meeting lexeme delimiter\n # Treat what we grabbed as a lexeme\n seq, pos = '', @pos + 1\n until lexeme_delimiter?(@pos)\n seq += @line[@pos]\n @pos += 1\n end\n lexeme = [seq, @line_no, pos]\n end\n\n break\n end\n @last_lexeme = lexeme\n end", "def octal_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 70 )\n\n\n\n type = OctalLiteral\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 527:16: '0' ( '0' .. '7' )+\n match( 0x30 )\n # at file 527:20: ( '0' .. '7' )+\n match_count_22 = 0\n while true\n alt_22 = 2\n look_22_0 = @input.peek( 1 )\n\n if ( look_22_0.between?( 0x30, 0x37 ) )\n alt_22 = 1\n\n end\n case alt_22\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x30, 0x37 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n match_count_22 > 0 and break\n eee = EarlyExit(22)\n\n\n raise eee\n end\n match_count_22 += 1\n end\n\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 70 )\n\n\n end", "def lex_en_line_begin; end", "def lex_en_line_begin; end", "def lex_en_line_begin; end", "def next!() end", "def next_token\n @tokens.shift\n end", "def push_literal\n <<-CODE\n next_int;\n t1 = cpu_current_literals(state, c);\n t2 = fast_fetch(t1, _int);\n stack_push(t2);\n CODE\n end", "def next()\n return \" \" unless has_next()\n if(@count <= 0)\n @char = @compressed_string[@i]\n @i += 1\n @count = get_count()\n end\n @count -= 1\n return @char\n end", "def lit(sexp, level)\n val = sexp.shift\n case val\n when Numeric\n val.inspect\n when Symbol\n @symbols[val.to_s] ||= \"$symbol_#{@sym_id += 1}\"\n when Regexp\n val == // ? /^/.inspect : val.inspect\n when Range\n \"$range(#{val.begin}, #{val.end}, #{val.exclude_end?})\"\n else\n raise \"Bad lit: #{val.inspect}\"\n end\n end", "def next_token; @stack.shift; end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n token =\n case state\n when nil then\n case\n when ss.skip(/\\s+/) then\n # do nothing\n when ss.skip(/:(#{SYMBOL_NAME})/o) then\n action { emit :tSYMBOL, &:to_sym }\n when ss.skip(/\"(.+?)\"/) then\n action { emit :tSTRING }\n when ss.skip(/[-+]?\\d+\\.\\d+/) then\n action { emit :tNUMBER, &:to_f }\n when ss.skip(/[-+]?\\d+/) then\n action { emit :tNUMBER, &:to_i }\n when ss.skip(/#{Regexp.union(\n %w\"( ) { | } [ ] < > $ ! ^ ` ... + * ? ,\"\n )}/o) then\n action { emit ss.matched, &:to_sym }\n when ss.skip(/#{REGEXP}/o) then\n action { emit_regexp }\n when ss.skip(/%?(#{CONST_NAME})/o) then\n action { emit :tPARAM_CONST }\n when ss.skip(/%([a-z_]+)/) then\n action { emit :tPARAM_NAMED }\n when ss.skip(/%(\\d*)/) then\n action { emit(:tPARAM_NUMBER) { |s| s.empty? ? 1 : s.to_i } } # Map `%` to `%1`\n when ss.skip(/_(#{IDENTIFIER})/o) then\n action { emit :tUNIFY }\n when ss.skip(/_/o) then\n action { emit :tWILDCARD }\n when ss.skip(/\\#(#{CALL})/o) then\n action { @state = :ARG; emit :tFUNCTION_CALL, &:to_sym }\n when ss.skip(/#{IDENTIFIER}\\?/o) then\n action { @state = :ARG; emit :tPREDICATE, &:to_sym }\n when ss.skip(/#{NODE_TYPE}/o) then\n action { emit :tNODE_TYPE, &:to_sym }\n when ss.skip(/\\#.*/) then\n action { emit_comment }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n when :ARG then\n case\n when ss.skip(/\\(/) then\n action { @state = nil; emit :tARG_LIST }\n when ss.skip(//) then\n action { @state = nil }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def string_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 42)\n\n type = STRING_LITERAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 486:4: '\\\\'' LITERAL_CHAR ( LITERAL_CHAR )* '\\\\''\n match(?\\')\n literal_char!\n # at line 486:22: ( LITERAL_CHAR )*\n loop do #loop 5\n alt_5 = 2\n look_5_0 = @input.peek(1)\n\n if (look_5_0.between?(0x0000, ?&) || look_5_0.between?(?(, 0xFFFF)) \n alt_5 = 1\n\n end\n case alt_5\n when 1\n # at line 486:22: LITERAL_CHAR\n literal_char!\n\n else\n break #loop 5\n end\n end\n match(?\\')\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 42)\n\n end", "def next_token\n \n # Early return if there is nothing to be read. This means we've reached the end of the file.\n \n unless @file[@pos]\n return nil\n end\n \n # This is the token that will be returned.\n token = Compiler::Token.new\n \n # Initializes a new instance of the automaton.\n automaton = Automaton.new\n \n # Will be set inside the loop, if necessary.\n increment_next = false\n \n # Will be set inside the loop. Marks whether we've reached the end of the file.\n eof = false\n \n # Build a new token while we don't have a new word yet and isn't in the failed state\n while ((automaton.state != :A || automaton.word.empty?) && automaton.state != :failed)\n \n # The next input for the automaton\n char = @file[@pos]\n \n if char\n \n # Moves the pointer to the next char\n @pos += 1\n \n automaton.transition(char)\n \n # While the automaton hasn't started to build a new word yet, increments the line and column numbers.\n # In this phase, we're just skipping blank characters\n if automaton.word.empty?\n if increment_next\n if char == \"\\n\"\n increment_next = true\n else\n increment_next = false\n end\n @line += 1\n @column = 0\n elsif char == \"\\n\"\n @column += 1\n increment_next = true\n else\n @column += 1\n end\n end\n \n else\n eof = true\n puts \"breaking\"\n break\n end\n end\n \n \n \n if eof\n automaton.transition(\"\\n\")\n else\n @pos -= 1\n end\n \n if (automaton.type == :identifier) && (Compiler.reserved_words.is_reserved?(automaton.word))\n token.type = :reserved_word\n else\n token.type = automaton.type\n end\n \n token.value = automaton.word\n token.line = @line\n token.column = @column\n \n return token\n \n end", "def racc_read_token(t, tok, val); end", "def next_line\r\n while true\r\n if (@lexemes[@pointer] != nil && @lexemes[@pointer] != '@')\r\n @pointer = @pointer.next\r\n else\r\n break\r\n end\r\n end\r\n end", "def find_literal(what)\n idx = @literals.index(what)\n return idx if idx\n add_literal(what)\n end", "def getNextToken\n \n #Check if the end has been reached\n if @currentChar == nil\n return\n end\n if @currentChar.match(/\\s/) != nil\n skipWhitespaces\n end\n \n if @currentChar == '%'\n comment\n if @currentChar.match(/\\s/) != nil\n skipWhitespaces\n end\n end \n \n if @currentChar.match(/[A-Za-z0-9_]/) != nil\n return Token.new(NAME, name)\n end\n \n if @currentChar == \"\\\"\"\n return Token.new(STRING, string)\n end\n \n if @currentChar == '{'\n advance\n return Token.new(OPENING_BRACE,'{')\n end\n \n if @currentChar == '}'\n advance\n return Token.new(CLOSING_BRACE,'}')\n end\n \n if @currentChar == '['\n advance\n return Token.new(OPENING_BRACKET,'[')\n end\n \n if @currentChar == ']'\n advance\n return Token.new(CLOSING_BRACKET,']')\n end\n \n if @currentChar == ':'\n advance\n return Token.new(COLON,':')\n end\n \n if @currentChar == '*'\n advance\n return Token.new(ASTERIX,'*')\n end\n \n if @currentChar == '='\n advance\n return Token.new(EQUALS,'=')\n end\n \n if @currentChar == ';'\n advance\n return Token.new(SEMICOLON,';')\n end\n \n if @currentChar == '^'\n advance\n return Token.new(CIRCUMFLEX,'^')\n end\n \n if @currentChar == '+'\n advance\n return Token.new(PLUS,'+')\n end\n if @currentChar == '('\n advance\n return Token.new(OPENING_PARANTHESIS,'(')\n end\n if @currentChar == ')'\n advance\n return Token.new(CLOSING_PARANTHESIS,')')\n end\n if @currentChar == '.'\n advance\n return Token.new(DOT,'.')\n end\n if @currentChar == '#'\n advance\n return Token.new(HASH,'#')\n end\n if @currentChar == ','\n advance\n return Token.new(COMMA,',')\n end\n error\n \n return Token.new(EOF,'EOF') \n \n end", "def process_lit(exp)\n # TODO: audit against obfuscator\n value = exp.shift\n case value\n when Integer then\n return \"LONG2NUM(#{value})\"\n when Float then\n return \"rb_float_new(#{value})\"\n when Symbol\n return \"ID2SYM(rb_intern(#{value.to_s.inspect}))\"\n when Range\n f = process_lit [ value.first ]\n l = process_lit [ value.last ]\n x = 0\n x = 1 if value.exclude_end?\n\n return \"rb_range_new(#{f}, #{l}, #{x})\"\n when Regexp\n src = value.source\n return \"rb_reg_new(#{src.inspect}, #{src.size}, #{value.options})\"\n else\n raise \"Bug! no: Unknown literal #{value}:#{value.class}\"\n end\n return nil\n end", "def next_token\n\t\[email protected]_token\n\tend", "def next\n @tok ||= read_token\n @tok, tok = nil, @tok\n @prev = tok\n return tok\n end", "def next() end", "def next() end", "def peek\n @tokens[@position]\n end", "def char_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 41)\n\n type = CHAR_LITERAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 482:4: '\\\\'' LITERAL_CHAR '\\\\''\n match(?\\')\n literal_char!\n match(?\\')\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 41)\n\n end", "def get_token\n @tokenbuf << read_token if @tokenbuf.length == 0\n return @tokenbuf.shift\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n token =\n case state\n when nil then\n case\n when text = ss.scan(/#{DIGIT}/) then\n action { [:DIGIT, text.to_i] }\n when text = ss.scan(/#{ADDITION}/) then\n action { [:ADDITION, text] }\n when text = ss.scan(/#{SUBSTRACTION}/) then\n action { [:SUBSTRACTION, text] }\n when text = ss.scan(/#{MULTIPLICATION}/) then\n action { [:MULTIPLICATION, text] }\n when text = ss.scan(/#{DIVISION}/) then\n action { [:DIVISION, text] }\n when text = ss.scan(/#{OPENING_PARANTHESIS}/) then\n action { [:OPENING_PARANTHESIS, text] }\n when text = ss.scan(/#{CLOSING_PARANTHESIS}/) then\n action { [:CLOSING_PARANTHESIS, text] }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def next\n displacement = @file.gets.try(:chomp).try(:to_f)\n return nil unless displacement\n\n ret = @curr_val\n @curr_val += displacement\n ret\n end", "def lex_en_line_begin=(_arg0); end", "def lex_en_line_begin=(_arg0); end", "def lex_en_line_begin=(_arg0); end", "def lex_start; end", "def lex_start; end", "def lex_start; end", "def lex_start; end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def next_word\n return unless md = get.match(FORWARD_WORD, cursor)\n self.cursor = md.offset(0).last\n end", "def match(ptr, depth = 0)\n case c = ptr.peek(1)\n when '\"', '`'\n start_pos = ptr.pos\n ptr.pos += 1\n AST.new(:string, value: ptr.scan_until(/#{c}/).chop,\n attributes: { type: char_to_type(c) },\n pos: start_pos)\n end\n end", "def consume!\n empty_line ||\n name_token ||\n comment_token ||\n whitespace_token ||\n line_token ||\n heredoc_token ||\n string_token ||\n number_token ||\n regex_token ||\n literal_token\n end", "def push_literal(*args)\n new_literal = Literal.new(self, *args)\n @literal_stack.push(new_literal)\n\n if new_literal.words? && new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_backslash_delimited_words\n else\n self.class.lex_en_plain_backslash_delimited_words\n end\n elsif new_literal.words? && !new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_words\n else\n self.class.lex_en_plain_words\n end\n elsif !new_literal.words? && new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_backslash_delimited\n else\n self.class.lex_en_plain_backslash_delimited\n end\n else\n if new_literal.interpolate?\n self.class.lex_en_interp_string\n else\n self.class.lex_en_plain_string\n end\n end\n end", "def set_literal\n <<-CODE\n next_int;\n tuple_put(state, cpu_current_literals(state, c), _int, stack_top());\n CODE\n end", "def next_char\n @pos += 1\n if (c = @source[@pos..@pos]) == BACKSLASH\n @pos += 1\n [true, @source[@pos..@pos]]\n else\n [false, c]\n end\n end", "def lex_en_expr_beg=(_arg0); end", "def lex_en_expr_beg=(_arg0); end", "def lex_en_expr_beg=(_arg0); end", "def next()\n @index += 1\n @string[@index...(@index+1)]\n end", "def read_character\n lit = read_literal\n\n return \" \" if lit.empty? && peek_char == \" \"\n CHARACTERS.fetch(lit.downcase) do\n # Return just the first character\n unread(lit[1..-1])\n lit[0,1]\n end\n end", "def lex_en_expr_mid; end", "def lex_en_expr_mid; end", "def lex_en_expr_mid; end", "def next_token\n @current_token = @lexer.next_token\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n if ss.check(/\\n/) then\n self.lineno += 1\n # line starts 1 position after the newline\n self.start_of_current_line_pos = ss.pos + 1\n end\n self.old_pos = ss.pos\n token =\n case state\n when nil, :option, :inner, :start, :macro, :rule, :group then\n case\n when ss.skip(/options?.*/) then\n [:state, :option]\n when ss.skip(/inner.*/) then\n [:state, :inner]\n when ss.skip(/macros?.*/) then\n [:state, :macro]\n when ss.skip(/rules?.*/) then\n [:state, :rule]\n when ss.skip(/start.*/) then\n [:state, :start]\n when ss.skip(/end/) then\n [:state, :END]\n when ss.skip(/\\A((?:.|\\n)*)class ([\\w:]+.*)/) then\n action { [:class, *matches] }\n when ss.skip(/\\n+/) then\n # do nothing\n when text = ss.scan(/\\s*(\\#.*)/) then\n action { [:comment, text] }\n when (state == :option) && (ss.skip(/\\s+/)) then\n # do nothing\n when (state == :option) && (text = ss.scan(/stub/i)) then\n action { [:option, text] }\n when (state == :option) && (text = ss.scan(/debug/i)) then\n action { [:option, text] }\n when (state == :option) && (text = ss.scan(/do_parse/i)) then\n action { [:option, text] }\n when (state == :option) && (text = ss.scan(/lineno/i)) then\n action { [:option, text] }\n when (state == :option) && (text = ss.scan(/column/i)) then\n action { [:option, text] }\n when (state == :inner) && (text = ss.scan(/.*/)) then\n action { [:inner, text] }\n when (state == :start) && (text = ss.scan(/.*/)) then\n action { [:start, text] }\n when (state == :macro) && (ss.skip(/\\s+(\\w+)\\s+#{RE}/o)) then\n action { [:macro, *matches] }\n when (state == :rule) && (ss.skip(/\\s*#{ST}?[\\ \\t]*#{RE}[\\ \\t]*#{ACT}?/o)) then\n action { [:rule, *matches] }\n when (state == :rule) && (ss.skip(/\\s*:[\\ \\t]*#{RE}/o)) then\n action { [:grouphead, *matches] }\n when (state == :group) && (ss.skip(/\\s*:[\\ \\t]*#{RE}/o)) then\n action { [:grouphead, *matches] }\n when (state == :group) && (ss.skip(/\\s*\\|\\s*#{ST}?[\\ \\t]*#{RE}[\\ \\t]*#{ACT}?/o)) then\n action { [:group, *matches] }\n when (state == :group) && (ss.skip(/\\s*#{ST}?[\\ \\t]*#{RE}[\\ \\t]*#{ACT}?/o)) then\n action { [:groupend, *matches] }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n when :END then\n case\n when ss.skip(/\\n+/) then\n # do nothing\n when text = ss.scan(/.*/) then\n action { [:end, text] }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def peek\n @tokens[@pos]\n end", "def next()\n if has_next()\n @strings[0][1]-=1\n c = @strings[0][0]\n while has_next() and @strings[0][1] == 0\n @strings.shift\n end\n return c\n end\n return \" \"\n end", "def next_cursor\n @result[:next_cursor]\n end", "def get\n @source_index += 1\n\n # Maintain line count.\n prev_char = @source_text[@source_index - 1]\n if @source_index.positive? && prev_char == \"\\n\"\n @line_index += 1\n @col_index = -1\n end\n\n @col_index += 1\n char = if @source_index > @last_index\n # Read past the end of source text.\n END_MARK\n else\n @source_text[@source_index]\n end\n Character.new(char, @line_index, @col_index, @source_index, @source_text)\n end", "def lex_en_expr_end; end", "def lex_en_expr_end; end", "def lex_en_expr_end; end", "def la( count = 1 )\n until @lookahead.length >= count\n if token = @lexer.next_token( @lexer_state.number ) then\n @lookahead << token\n else\n nyi \"error handling for lexer error\" if @lexer.input_remaining?\n break\n end\n end\n \n return @lookahead[count - 1]\n end", "def get_next\n return if eof?\n\n @buffer << @io.gets if @buffer.empty?\n\n until @io.eof?\n line = @io.gets\n next unless line\n\n if @parser.start_new?(line) || @buffer.empty?\n @buffer << line\n break\n else\n @buffer.last << line\n end\n end\n\n return if @buffer.empty?\n @parser.parse(@buffer.slice!(0)) || self.get_next\n end", "def run(source, until_token = :invalid, token_count = nil)\n @at_end = false\n @source = source\n @reader = source.each_char\n\n read_next()\n\n while token_count == nil || token_count > 0\n skip_whitespace()\n current = @marker.character\n break unless current\n\n token = Token.new\n token.kind = :invalid\n token.from = @marker.source_index\n token.position = @marker.position.dup\n\n case current\n when ?\", ?'\n read_string(token)\n\n when ?0\n case peek_next()\n when ?x, ?X, ?b, ?B then read_base_number(token)\n else read_number(token)\n end\n\n when ?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9\n read_number(token)\n\n # dot, double dot, triple dot, and floats beginning with a dot\n when ?.\n token.kind = :dot\n case peek_next()\n when ?0, ?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9 then read_number(token)\n when ?.\n read_next()\n token.kind = :double_dot\n\n if peek_next() == ?.\n read_next()\n token.kind = :triple_dot\n end\n\n token.value = Token::DESCRIPTORS[token.kind]\n else\n token.value = Token::DESCRIPTORS[token.kind]\n end\n\n when ?_, ?a, ?b, ?c, ?d, ?e, ?f, ?g, ?h, ?i, ?j, ?k, ?l, ?m, ?n, ?o, ?p,\n ?q, ?r, ?s, ?t, ?u, ?v, ?w, ?x, ?y, ?z, ?A, ?B, ?C, ?D, ?E, ?F, ?G, ?H,\n ?I, ?J, ?K, ?L, ?M, ?N, ?O, ?P, ?Q, ?R, ?S, ?T, ?U, ?V, ?W, ?X, ?Y, ?Z\n read_word(token)\n\n when ?\\n\n token.value = current\n token.kind = :newline\n\n when ??, ?#, ?@, ?$, ?%, ?(, ?), ?[, ?], ?{, ?}, ?^, ?~, ?`, ?\\\\, ?,, ?;\n token.value = current\n token.kind = PUNCTUATION_STRINGS[current]\n\n when ?=, ?|, ?&, ?:, ?+, ?*\n current << read_next() if peek_next() == current\n token.value = current\n token.kind = PUNCTUATION_STRINGS[current]\n\n when ?!\n current << read_next() if peek_next() == ?=\n token.value = current\n token.kind = PUNCTUATION_STRINGS[current]\n\n when ?>, ?<\n case peek_next()\n when ?=, current then current << read_next()\n end\n token.value = current\n token.kind = PUNCTUATION_STRINGS[current]\n\n when ?-\n case peek_next()\n when ?>, current then current << read_next()\n end\n token.value = current\n token.kind = PUNCTUATION_STRINGS[current]\n\n when ?/\n case peek_next()\n when ?/ then read_line_comment(token)\n when ?* then read_block_comment(token)\n else\n token.value = Token::DESCRIPTORS[token.kind = :slash]\n read_next()\n end\n\n end # case current\n\n token.to = @marker.source_index\n last_kind = token.kind\n if !(@skip_comments && token.comment?) && !(@skip_newlines && token.newline?)\n if last_kind != :invalid\n @tokens << token\n yield token if block_given?\n else\n raise RuntimeError, \"#{token.position} Invalid token: #{token.inspect}\"\n end\n end\n\n break if until_token == last_kind\n\n read_next()\n token_count -= 1 unless token_count.nil?\n end # while current && token_count > 0\n\n @source = nil\n @reader = nil\n\n self\n end", "def next_char\n self.cursor += 1\n end", "def next_item\n lexeme, token = @lexer.next, nil\n if lexeme[0].nil?\n token = { type: :eof }\n elsif lexeme[0].lol_string?\n token = { type: :string, data: lexeme[0][1..-2] }\n elsif lexeme[0].lol_integer?\n token = { type: :integer, data: lexeme[0].to_i }\n elsif lexeme[0].lol_float?\n token = { type: :float, data: lexeme[0].to_f }\n elsif lexeme[0].lol_boolean?\n token = { type: :boolean, data: (lexeme[0] == 'WIN') }\n elsif lexeme[0] == '!'\n token = { type: :exclamation }\n elsif lexeme[0] == \"\\n\"\n token = { type: :newline }\n else\n # Try to match keyword\n token_type = match_longest(lexeme[0], @token_table)\n unless token_type.nil?\n token = { type: token_type }\n # Consume all peeked lexemes\n token_type.to_s.count('_').times { @lexer.next }\n else\n # Try to match identifier\n if lexeme[0].lol_identifier?\n token = { type: :identifier, data: lexeme[0] }\n end\n end\n end\n raise UnknownTokenError.new(lexeme) if token.nil?\n token.merge(line: lexeme[1], pos: lexeme[2])\n end", "def peek\n @tok ||= read_token\n end", "def next(pointer); end", "def next(pointer); end", "def string_literal\n # StringLiteral ::\n # \" DoubleStringCharactersopt \"\n # ' SingleStringCharactersopt '\n #\n # DoubleStringCharacters ::\n # DoubleStringCharacter DoubleStringCharactersopt\n #\n # SingleStringCharacters ::\n # SingleStringCharacter SingleStringCharactersopt\n #\n # DoubleStringCharacter ::\n # SourceCharacter but not one of \" or \\ or LineTerminator\n # \\ EscapeSequence\n # LineContinuation\n #\n # SingleStringCharacter ::\n # SourceCharacter but not one of ' or \\ or LineTerminator\n # \\ EscapeSequence\n # LineContinuation\n #\n if (code = @codes[@pos]) == 0x27 #'\n term = 0x27\n elsif code == 0x22 #\"\n term = 0x22\n else\n return nil\n end\n @pos += 1\n pos0 = @pos\n\n str = []\n while (code = @codes[@pos])\n if code.nil?\n raise ParseError.new(\"no `#{term}' at end of string\", self)\n elsif line_terminator?(code)\n raise ParseError.new(\"string has line terminator in body\", self)\n elsif code == 0x5c #\\\n @pos += 1\n str.push(escape_sequence)\n elsif code == term\n @pos += 1\n return ECMA262::ECMA262String.new(str.compact.pack(\"U*\"))\n else\n @pos += 1\n str.push(code)\n end\n end\n nil\n end", "def get_char\n @look = @expression[@number]\n @number +=1\nend", "def next_token\n return [false, false] if @src.eos?\n# p @src.rest if @yydebug\n if ret = @src.scan(EM_OPEN_RE)\n @pre << ret\n [:EM_OPEN, ret]\n elsif ret = @src.scan(EM_CLOSE_RE)\n @pre << ret\n [:EM_CLOSE, ret]\n elsif ret = @src.scan(CODE_OPEN_RE)\n @pre << ret\n [:CODE_OPEN, ret]\n elsif ret = @src.scan(CODE_CLOSE_RE)\n @pre << ret\n [:CODE_CLOSE, ret]\n elsif ret = @src.scan(VAR_OPEN_RE)\n @pre << ret\n [:VAR_OPEN, ret]\n elsif ret = @src.scan(VAR_CLOSE_RE)\n @pre << ret\n [:VAR_CLOSE, ret]\n elsif ret = @src.scan(KBD_OPEN_RE)\n @pre << ret\n [:KBD_OPEN, ret]\n elsif ret = @src.scan(KBD_CLOSE_RE)\n @pre << ret\n [:KBD_CLOSE, ret]\n elsif ret = @src.scan(INDEX_OPEN_RE)\n @pre << ret\n [:INDEX_OPEN, ret]\n elsif ret = @src.scan(INDEX_CLOSE_RE)\n @pre << ret\n [:INDEX_CLOSE, ret]\n elsif ret = @src.scan(REF_OPEN_RE)\n @pre << ret\n [:REF_OPEN, ret]\n elsif ret = @src.scan(REF_CLOSE_RE)\n @pre << ret\n [:REF_CLOSE, ret]\n elsif ret = @src.scan(FOOTNOTE_OPEN_RE)\n @pre << ret\n [:FOOTNOTE_OPEN, ret]\n elsif ret = @src.scan(FOOTNOTE_CLOSE_RE)\n @pre << ret\n [:FOOTNOTE_CLOSE, ret]\n elsif ret = @src.scan(VERB_OPEN_RE)\n @pre << ret\n [:VERB_OPEN, ret]\n elsif ret = @src.scan(VERB_CLOSE_RE)\n @pre << ret\n [:VERB_CLOSE, ret]\n elsif ret = @src.scan(BAR_RE)\n @pre << ret\n [:BAR, ret]\n elsif ret = @src.scan(QUOTE_RE)\n @pre << ret\n [:QUOTE, ret]\n elsif ret = @src.scan(SLASH_RE)\n @pre << ret\n [:SLASH, ret]\n elsif ret = @src.scan(BACK_SLASH_RE)\n @pre << ret\n [:BACK_SLASH, ret]\n elsif ret = @src.scan(URL_RE)\n @pre << ret\n [:URL, ret]\n elsif ret = @src.scan(OTHER_RE)\n @pre << ret\n [:OTHER, ret]\n else\n ret = @src.rest\n @pre << ret\n @src.terminate\n [:OTHER, ret]\n end\nend" ]
[ "0.6661161", "0.65402114", "0.6508427", "0.6461592", "0.631385", "0.63018566", "0.6278044", "0.6261783", "0.6228459", "0.62183183", "0.6217015", "0.62010616", "0.6128506", "0.6119794", "0.6070626", "0.607005", "0.607005", "0.607005", "0.6038593", "0.6032305", "0.6027879", "0.59773856", "0.59692675", "0.5951694", "0.5948259", "0.5930848", "0.5920658", "0.59163344", "0.5915523", "0.5907998", "0.5844701", "0.5844701", "0.5844701", "0.58339727", "0.57966095", "0.57921135", "0.5773786", "0.5770626", "0.5766907", "0.5749456", "0.5747575", "0.5746652", "0.5715062", "0.57071006", "0.5694852", "0.5689935", "0.56792706", "0.5678813", "0.5655769", "0.5625529", "0.5625529", "0.56229424", "0.5616746", "0.5616045", "0.5604671", "0.55973804", "0.55918646", "0.55918646", "0.55918646", "0.5588597", "0.5588597", "0.5588597", "0.5588597", "0.5580931", "0.5580931", "0.5580931", "0.5580931", "0.5577092", "0.5576471", "0.55700105", "0.556012", "0.55587065", "0.55476016", "0.55464345", "0.55464345", "0.55464345", "0.55410796", "0.55393744", "0.55302", "0.55302", "0.55302", "0.5516685", "0.5513528", "0.55126715", "0.5512632", "0.5508489", "0.55067843", "0.55010664", "0.55010664", "0.55010664", "0.5496119", "0.54935473", "0.5492439", "0.54918116", "0.54769284", "0.5476528", "0.54749", "0.54749", "0.5470606", "0.54585963", "0.54585856" ]
0.0
-1
fetch next literal. position is not forwarded. white spaces are skipped and ignored. line terminators are not ignored.
def peek_lit_nolt(hint) pos0 = @pos while lit = next_input_element(hint) and lit.ws? end @pos = pos0 lit end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_token\n result = peek_token\n @start = @finish\n return result if @start >= @expr.length\n\n if @expr[@start].numeric?\n @finish = @start + 1\n while @finish < @expr.length && @expr[@finish].to_s.numeric?\n @finish = @finish + 1\n end\n else\n @finish = @start + 1\n end\n result\n end", "def literal\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 22 )\n\n\n value = nil\n\n\n a = nil\n\n\n begin\n # at line 142:3: (a= INTEGER |a= FLOAT |a= BOOLEAN |a= STRING |a= CHAR )\n alt_38 = 5\n case look_38 = @input.peek( 1 )\n when INTEGER then alt_38 = 1\n when FLOAT then alt_38 = 2\n when BOOLEAN then alt_38 = 3\n when STRING then alt_38 = 4\n when CHAR then alt_38 = 5\n else\n @state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )\n\n\n\n raise NoViableAlternative( \"\", 38, 0 )\n\n end\n case alt_38\n when 1\n # at line 142:5: a= INTEGER\n a = match( INTEGER, TOKENS_FOLLOWING_INTEGER_IN_literal_1037 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Int, a.text) \n # <-- action\n end\n\n\n when 2\n # at line 143:5: a= FLOAT\n a = match( FLOAT, TOKENS_FOLLOWING_FLOAT_IN_literal_1047 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Float, a.text) \n # <-- action\n end\n\n\n when 3\n # at line 144:5: a= BOOLEAN\n a = match( BOOLEAN, TOKENS_FOLLOWING_BOOLEAN_IN_literal_1059 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Bool, a.text) \n # <-- action\n end\n\n\n when 4\n # at line 145:5: a= STRING\n a = match( STRING, TOKENS_FOLLOWING_STRING_IN_literal_1069 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:String, a.text) \n # <-- action\n end\n\n\n when 5\n # at line 146:5: a= CHAR\n a = match( CHAR, TOKENS_FOLLOWING_CHAR_IN_literal_1080 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Char, a.text) \n # <-- action\n end\n\n\n end\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 22 )\n\n\n end\n\n return value\n end", "def parse_lit\n case l.front.type\n when :str then parse_str_lit\n when :chr then parse_char_lit\n when :num then parse_num_lit\n else\n error \"expected a literal\"\n end\n end", "def _literal\n\n _save = self.pos\n while true # choice\n _tmp = apply(:_number)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_string)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_literal unless _tmp\n return _tmp\n end", "def literal\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 33 )\n return_value = LiteralReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n root_0 = nil\n\n _last = _first_0 = nil\n string_literal267 = nil\n __IVAR268__ = nil\n __ID269__ = nil\n string_literal270 = nil\n string_literal271 = nil\n string_literal272 = nil\n string_literal273 = nil\n __NUMBER274__ = nil\n __STRING275__ = nil\n __DOC276__ = nil\n __REGEX277__ = nil\n __ARRAY278__ = nil\n __OBJECT280__ = nil\n string_literal282 = nil\n __ID283__ = nil\n string_literal286 = nil\n argument279 = nil\n property_definition281 = nil\n parameters284 = nil\n statement_block285 = nil\n parameters287 = nil\n statement_block288 = nil\n\n tree_for_string_literal267 = nil\n tree_for_IVAR268 = nil\n tree_for_ID269 = nil\n tree_for_string_literal270 = nil\n tree_for_string_literal271 = nil\n tree_for_string_literal272 = nil\n tree_for_string_literal273 = nil\n tree_for_NUMBER274 = nil\n tree_for_STRING275 = nil\n tree_for_DOC276 = nil\n tree_for_REGEX277 = nil\n tree_for_ARRAY278 = nil\n tree_for_OBJECT280 = nil\n tree_for_string_literal282 = nil\n tree_for_ID283 = nil\n tree_for_string_literal286 = nil\n\n begin\n # at line 229:3: ( 'this' | IVAR | ID | 'null' | 'true' | 'false' | 'undefined' | NUMBER | STRING | DOC | REGEX | ^( ARRAY ( argument )* ) | ^( OBJECT ( property_definition )* ) | ^( 'function' ( ID )? parameters statement_block ) | ^( '->' ( parameters )? statement_block ) )\n alt_41 = 15\n case look_41 = @input.peek( 1 )\n when THIS then alt_41 = 1\n when IVAR then alt_41 = 2\n when ID then alt_41 = 3\n when NULL then alt_41 = 4\n when TRUE then alt_41 = 5\n when FALSE then alt_41 = 6\n when UNDEFINED then alt_41 = 7\n when NUMBER then alt_41 = 8\n when STRING then alt_41 = 9\n when DOC then alt_41 = 10\n when REGEX then alt_41 = 11\n when ARRAY then alt_41 = 12\n when OBJECT then alt_41 = 13\n when FUNCTION then alt_41 = 14\n when ARROW then alt_41 = 15\n else\n raise NoViableAlternative( \"\", 41, 0 )\n end\n case alt_41\n when 1\n root_0 = @adaptor.create_flat_list\n\n\n # at line 229:5: 'this'\n _last = @input.look\n string_literal267 = match( THIS, TOKENS_FOLLOWING_THIS_IN_literal_1643 )\n\n tree_for_string_literal267 = @adaptor.copy_node( string_literal267 )\n\n @adaptor.add_child( root_0, tree_for_string_literal267 )\n\n\n\n when 2\n root_0 = @adaptor.create_flat_list\n\n\n # at line 230:5: IVAR\n _last = @input.look\n __IVAR268__ = match( IVAR, TOKENS_FOLLOWING_IVAR_IN_literal_1649 )\n\n tree_for_IVAR268 = @adaptor.copy_node( __IVAR268__ )\n\n @adaptor.add_child( root_0, tree_for_IVAR268 )\n\n\n\n when 3\n root_0 = @adaptor.create_flat_list\n\n\n # at line 231:5: ID\n _last = @input.look\n __ID269__ = match( ID, TOKENS_FOLLOWING_ID_IN_literal_1655 )\n\n tree_for_ID269 = @adaptor.copy_node( __ID269__ )\n\n @adaptor.add_child( root_0, tree_for_ID269 )\n\n\n\n when 4\n root_0 = @adaptor.create_flat_list\n\n\n # at line 232:5: 'null'\n _last = @input.look\n string_literal270 = match( NULL, TOKENS_FOLLOWING_NULL_IN_literal_1661 )\n\n tree_for_string_literal270 = @adaptor.copy_node( string_literal270 )\n\n @adaptor.add_child( root_0, tree_for_string_literal270 )\n\n\n\n when 5\n root_0 = @adaptor.create_flat_list\n\n\n # at line 233:5: 'true'\n _last = @input.look\n string_literal271 = match( TRUE, TOKENS_FOLLOWING_TRUE_IN_literal_1667 )\n\n tree_for_string_literal271 = @adaptor.copy_node( string_literal271 )\n\n @adaptor.add_child( root_0, tree_for_string_literal271 )\n\n\n\n when 6\n root_0 = @adaptor.create_flat_list\n\n\n # at line 234:5: 'false'\n _last = @input.look\n string_literal272 = match( FALSE, TOKENS_FOLLOWING_FALSE_IN_literal_1673 )\n\n tree_for_string_literal272 = @adaptor.copy_node( string_literal272 )\n\n @adaptor.add_child( root_0, tree_for_string_literal272 )\n\n\n\n when 7\n root_0 = @adaptor.create_flat_list\n\n\n # at line 235:5: 'undefined'\n _last = @input.look\n string_literal273 = match( UNDEFINED, TOKENS_FOLLOWING_UNDEFINED_IN_literal_1679 )\n\n tree_for_string_literal273 = @adaptor.copy_node( string_literal273 )\n\n @adaptor.add_child( root_0, tree_for_string_literal273 )\n\n\n\n when 8\n root_0 = @adaptor.create_flat_list\n\n\n # at line 236:5: NUMBER\n _last = @input.look\n __NUMBER274__ = match( NUMBER, TOKENS_FOLLOWING_NUMBER_IN_literal_1685 )\n\n tree_for_NUMBER274 = @adaptor.copy_node( __NUMBER274__ )\n\n @adaptor.add_child( root_0, tree_for_NUMBER274 )\n\n\n\n when 9\n root_0 = @adaptor.create_flat_list\n\n\n # at line 237:5: STRING\n _last = @input.look\n __STRING275__ = match( STRING, TOKENS_FOLLOWING_STRING_IN_literal_1691 )\n\n tree_for_STRING275 = @adaptor.copy_node( __STRING275__ )\n\n @adaptor.add_child( root_0, tree_for_STRING275 )\n\n\n\n when 10\n root_0 = @adaptor.create_flat_list\n\n\n # at line 238:5: DOC\n _last = @input.look\n __DOC276__ = match( DOC, TOKENS_FOLLOWING_DOC_IN_literal_1697 )\n\n tree_for_DOC276 = @adaptor.copy_node( __DOC276__ )\n\n @adaptor.add_child( root_0, tree_for_DOC276 )\n\n\n\n when 11\n root_0 = @adaptor.create_flat_list\n\n\n # at line 239:5: REGEX\n _last = @input.look\n __REGEX277__ = match( REGEX, TOKENS_FOLLOWING_REGEX_IN_literal_1703 )\n\n tree_for_REGEX277 = @adaptor.copy_node( __REGEX277__ )\n\n @adaptor.add_child( root_0, tree_for_REGEX277 )\n\n\n\n when 12\n root_0 = @adaptor.create_flat_list\n\n\n # at line 240:5: ^( ARRAY ( argument )* )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __ARRAY278__ = match( ARRAY, TOKENS_FOLLOWING_ARRAY_IN_literal_1711 )\n\n tree_for_ARRAY278 = @adaptor.copy_node( __ARRAY278__ )\n\n root_1 = @adaptor.become_root( tree_for_ARRAY278, root_1 )\n\n\n\n if @input.peek == DOWN\n match( DOWN, nil )\n # at line 240:14: ( argument )*\n while true # decision 37\n alt_37 = 2\n look_37_0 = @input.peek( 1 )\n\n if ( look_37_0.between?( AMP, AMP_ASGN ) || look_37_0 == POST_DECR || look_37_0.between?( GEQ, AREF ) || look_37_0.between?( GREATER, HAT ) || look_37_0.between?( ARROW, HAT_ASGN ) || look_37_0 == ASGN || look_37_0 == REGEX || look_37_0 == IN || look_37_0 == INCR || look_37_0.between?( INSTANCEOF, RSHIFT3 ) || look_37_0 == RSHIFT3_ASGN || look_37_0.between?( RSHIFT_ASGN, COLON ) || look_37_0 == LEQ || look_37_0.between?( LESS, SLASH ) || look_37_0 == SLASH_ASGN || look_37_0.between?( STAR, DECR ) || look_37_0 == STAR_ASGN || look_37_0 == LSHIFT || look_37_0.between?( DELETE, THIS ) || look_37_0.between?( MINUS, TILDE ) || look_37_0.between?( MINUS_ASGN, MOD ) || look_37_0.between?( MOD_ASGN, TYPEOF ) || look_37_0.between?( NEQ, UMINUS ) || look_37_0.between?( NEQQ, UNDEFINED ) || look_37_0 == NEW || look_37_0 == NOT || look_37_0.between?( NULL, UPLUS ) || look_37_0 == OBJECT || look_37_0.between?( EQ, OR_ASGN ) || look_37_0 == FALSE || look_37_0 == PIPE || look_37_0 == PIPE_ASGN || look_37_0 == PLUS || look_37_0.between?( ID, DOC ) )\n alt_37 = 1\n\n end\n case alt_37\n when 1\n # at line 240:14: argument\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_argument_IN_literal_1713 )\n argument279 = argument\n @state.following.pop\n\n @adaptor.add_child( root_1, argument279.tree )\n\n\n else\n break # out of loop for decision 37\n end\n end # loop for decision 37\n\n match( UP, nil )\n end\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 13\n root_0 = @adaptor.create_flat_list\n\n\n # at line 241:5: ^( OBJECT ( property_definition )* )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __OBJECT280__ = match( OBJECT, TOKENS_FOLLOWING_OBJECT_IN_literal_1724 )\n\n tree_for_OBJECT280 = @adaptor.copy_node( __OBJECT280__ )\n\n root_1 = @adaptor.become_root( tree_for_OBJECT280, root_1 )\n\n\n\n if @input.peek == DOWN\n match( DOWN, nil )\n # at line 241:15: ( property_definition )*\n while true # decision 38\n alt_38 = 2\n look_38_0 = @input.peek( 1 )\n\n if ( look_38_0 == GET || look_38_0 == COLON || look_38_0 == SET )\n alt_38 = 1\n\n end\n case alt_38\n when 1\n # at line 241:15: property_definition\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_property_definition_IN_literal_1726 )\n property_definition281 = property_definition\n @state.following.pop\n\n @adaptor.add_child( root_1, property_definition281.tree )\n\n\n else\n break # out of loop for decision 38\n end\n end # loop for decision 38\n\n match( UP, nil )\n end\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 14\n root_0 = @adaptor.create_flat_list\n\n\n # at line 242:5: ^( 'function' ( ID )? parameters statement_block )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal282 = match( FUNCTION, TOKENS_FOLLOWING_FUNCTION_IN_literal_1737 )\n\n tree_for_string_literal282 = @adaptor.copy_node( string_literal282 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal282, root_1 )\n\n\n\n match( DOWN, nil )\n # at line 242:19: ( ID )?\n alt_39 = 2\n look_39_0 = @input.peek( 1 )\n\n if ( look_39_0 == ID )\n alt_39 = 1\n end\n case alt_39\n when 1\n # at line 242:19: ID\n _last = @input.look\n __ID283__ = match( ID, TOKENS_FOLLOWING_ID_IN_literal_1739 )\n\n tree_for_ID283 = @adaptor.copy_node( __ID283__ )\n\n @adaptor.add_child( root_1, tree_for_ID283 )\n\n\n\n end\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_parameters_IN_literal_1742 )\n parameters284 = parameters\n @state.following.pop\n\n @adaptor.add_child( root_1, parameters284.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_statement_block_IN_literal_1744 )\n statement_block285 = statement_block\n @state.following.pop\n\n @adaptor.add_child( root_1, statement_block285.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 15\n root_0 = @adaptor.create_flat_list\n\n\n # at line 243:5: ^( '->' ( parameters )? statement_block )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal286 = match( ARROW, TOKENS_FOLLOWING_ARROW_IN_literal_1754 )\n\n tree_for_string_literal286 = @adaptor.copy_node( string_literal286 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal286, root_1 )\n\n\n\n match( DOWN, nil )\n # at line 243:13: ( parameters )?\n alt_40 = 2\n look_40_0 = @input.peek( 1 )\n\n if ( look_40_0 == PARAMS )\n alt_40 = 1\n end\n case alt_40\n when 1\n # at line 243:13: parameters\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_parameters_IN_literal_1756 )\n parameters287 = parameters\n @state.following.pop\n\n @adaptor.add_child( root_1, parameters287.tree )\n\n\n end\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_statement_block_IN_literal_1759 )\n statement_block288 = statement_block\n @state.following.pop\n\n @adaptor.add_child( root_1, statement_block288.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n end\n return_value.tree = @adaptor.rule_post_processing( root_0 )\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 33 )\n\n end\n \n return return_value\n end", "def read_next()\n return nil if @at_end\n\n begin\n pos = @marker.position\n\n if @marker.character == ?\\n\n pos.line += 1\n pos.column = 0\n end\n\n @marker.character = @reader.next\n @marker.source_index += 1\n pos.column += 1\n rescue StopIteration\n @at_end = true\n @marker.character = nil\n end\n\n @marker.character\n end", "def literal(buffer)\n reader = lambda { |string = ''|\n buffer.major_mode.read(1) do |event|\n if unicode = event.unicode\n string += unicode # copy\n buffer.message string.inspect\n\n case result = literal_handle(buffer, string)\n when nil\n reader.call(string)\n when String\n literal_insert(buffer, result)\n end\n else\n return # Unverrichteter Dinge\n end\n end\n }\n\n reader.call\n end", "def next_token\n\n if @ss.bol?\n @line+=1\n @[email protected]\n end\n\n position=[@line,@ss.pos-@old_pos+1]\n\n return :eos if @ss.eos?\n\n case\n when text = @ss.scan(NEWLINE)\n next_token()\n when text = @ss.scan(SPACE)\n next_token()\n when text = @ss.scan(COMMENT)\n next_token()\n when text = @ss.scan(ARROW)\n return Token.new [:arrow,text,position]\n when text = @ss.scan(LT)\n return Token.new [:lt,text,position]\n when text = @ss.scan(LBRACK)\n return Token.new [:lbrack,text,position]\n when text = @ss.scan(RBRACK)\n return Token.new [:rbrack,text,position]\n when text = @ss.scan(IDENTIFIER)\n case\n when value = text.match(IDENT)\n return Token.new [:IDENT,text,position]\n when value = text.match(FLOAT)\n return Token.new [:FLOAT,text,position]\n when value = text.match(INT)\n return Token.new [:INT,text,position]\n when value = text.match(STRING)\n return Token.new [:STRING,text,position]\n when value = text.match(MODULE)\n return Token.new [:module,text,position]\n when value = text.match(CLASS)\n return Token.new [:class,text,position]\n when value = text.match(END_)\n return Token.new [:end,text,position]\n when value = text.match(ATTR)\n return Token.new [:attr,text,position]\n when value = text.match(LPAREN)\n return Token.new [:lparen,text,position]\n when value = text.match(RPAREN)\n return Token.new [:rparen,text,position]\n else\n return Token.new [:identifier,text,position]\n end\n else\n x = @ss.getch\n return Token.new [x, x,position]\n end\n end", "def next_token; end", "def next()\n if @ss.scan_until(token_re)\n term = @ss.matched\n term_end = @ss.pos\n term_start = term_end - term.size\n else\n return nil\n end\n\n return Token.new(normalize(term), term_start, term_end)\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n if ss.peek(1) == \"\\n\"\n self.lineno += 1\n # line starts 1 position after the newline\n self.start_of_current_line_pos = ss.pos + 1\n end\n self.old_pos = ss.pos\n token =\n case state\n when nil then\n case\n when ss.skip(/[ \\t]+/) then\n # do nothing\n when ss.skip(/\\/\\/[^\\r\\n]*/) then\n # do nothing\n when text = ss.scan(/\\r|\\n/) then\n newline text\n when text = ss.scan(/[!=<>]=?/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/[(){},;.\\-+\\/*]/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/#{DIGIT}+(\\.#{DIGIT}+)?/) then\n action { [:NUMBER, text] }\n when text = ss.scan(/nil/) then\n action { [:NIL, text] }\n when text = ss.scan(/false/) then\n action { [:FALSE, text] }\n when text = ss.scan(/true/) then\n action { [:TRUE, text] }\n when text = ss.scan(/#{ALPHA}(#{ALPHA}|#{DIGIT})*/) then\n action { [:IDENTIFIER, text] }\n when ss.skip(/\"\"/) then\n action { [:STRING, '\"\"'] }\n when ss.skip(/\"/) then\n [:state, :IN_STRING]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n when :IN_STRING then\n case\n when text = ss.scan(/[^\"]+/) then\n action { [:STRING, \"\\\"#{text}\\\"\"] }\n when ss.skip(/\"/) then\n [:state, nil]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def next\n ret = peek_next\n @str.slice! @last_re if ret.type != :eos\n\n ret\n end", "def peek_next\n fail 'No string specified' unless @str\n\n return Token.new(:eos) if skip_space == :eos\n\n PATTERNS.each do |re, func|\n re.match(@str) do |mat|\n @last_re = re # This is what will be removed\n mat = mat.to_s\n return func.is_a?(Symbol) ? send(func, mat) : instance_exec(mat, &func)\n end\n end\n end", "def next_token\n @sy = @tokenizer.next_token\n \n # ignore EOL tokens since no productions would accept them\n while @sy.type == TokenType::EOL_TOKEN\n @sy = @tokenizer.next_token\n end\n end", "def lex_en_expr_beg; end", "def lex_en_expr_beg; end", "def lex_en_expr_beg; end", "def literal_token\n if match = @chunk.match(OPERATOR)\n value, _ = *match\n else\n value = @chunk[0]\n end\n tag = value\n\n if COMPOUND_ASSIGN.include?(value)\n tag = :COP\n else\n case value\n when '(', '{', '[' then @ends.push(INVERSES[value])\n when ')', '}', ']'\n prev = @tokens[-1]\n pair(value)\n tokens.delete_at(-1) if prev && prev[0] == :TERM\n end\n end\n token(tag, value)\n value.size\n end", "def next_token\n #dputs \"@line: \" + @line\n if @state == :normal\n while true\n temp = _next_token\n unless temp == \"#white_space\" || temp == \"#comment\"\n break\n end\n end\n #dputs \"token: \" + temp\n @current_token = temp\n return temp\n else\n return :Terminate\n end\n \n end", "def next_item\n return @last_lexeme if @last_lexeme[0].nil?\n while true\n @line = next_line if buffer_empty?\n if @line.nil?\n lexeme = [nil, @line_no, 1]\n break\n end\n\n # Skip whitespaces\n while space?(@line[@pos])\n @pos += 1\n end\n\n # Skip triple dot characters (join lines)\n if @line[@pos, 4] == \"...\\n\" || @line[@pos, 2] == \"…\\n\"\n line_no, pos = @line_no, @pos + 1\n @line, @pos = next_line, 0\n if @line.nil? || @line.strip.empty?\n raise SyntaxError.new(line_no, pos, 'Line continuation may not be followed by an empty line')\n end\n next\n end\n\n # Skip one line comments\n if @line[@pos, 3] == 'BTW'\n @pos = @line.length - 1\n end\n # and multiline ones\n if @last_lexeme[0] == \"\\n\" && @line[@pos, 4] == 'OBTW'\n tldr_found, line_no, pos = false, @line_no, @pos + 1\n while true\n @line = next_line\n break if @line.nil?\n m = @line.chomp.match(/(^|\\s+)TLDR\\s*(,|$)/)\n unless m.nil?\n tldr_found = true\n @pos = m.end(0)\n break\n end\n end\n unless tldr_found\n raise SyntaxError.new(line_no, pos, 'Unterminated multiline comment')\n end\n next\n end\n\n if @line[@pos] == \"\\n\" || @line[@pos] == '!'\n # Handle newline and bang separately\n lexeme = [@line[@pos], @line_no, @pos + 1]\n @pos += 1\n elsif @line[@pos] == ','\n # Comma is a virtual newline\n lexeme = [\"\\n\", @line_no, @pos + 1]\n @pos += 1\n elsif @line[@pos] == '\"'\n # Strings begin with \"\n # Need to handle empty strings separately\n if @line[@pos + 1] == '\"'\n string = '\"\"'\n else\n m = @line.match(/([^:](?:::)*)\"/, @pos + 1)\n string = @line[@pos..m.end(0) - 1] unless m.nil?\n end\n # String must be followed by an allowed lexeme delimiter\n if string.nil? || !lexeme_delimiter?(@pos + string.length)\n raise SyntaxError.new(@line_no, @pos + 1, 'Unterminated string constant')\n end\n lexeme = [%Q[\"#{escape_string(string[1..-2])}\"], @line_no, @pos + 1]\n @pos = @pos + string.length\n else\n # Grab as much characters as we can until meeting lexeme delimiter\n # Treat what we grabbed as a lexeme\n seq, pos = '', @pos + 1\n until lexeme_delimiter?(@pos)\n seq += @line[@pos]\n @pos += 1\n end\n lexeme = [seq, @line_no, pos]\n end\n\n break\n end\n @last_lexeme = lexeme\n end", "def process_lit(exp)\n # TODO what about floats and big numbers?\n\n value = exp.shift\n c_type = exp.c_type\n case c_type\n when CType.long, CType.float then\n return value.to_s\n when CType.symbol then\n return value.to_s.inspect # HACK wrong! write test!\n else\n raise \"Bug! no: Unknown literal #{value}:#{value.class}\"\n end\n end", "def next_token\n return @extra_tokens.pop unless @extra_tokens.empty?\n\n skip_whitespace\n c = @sql[@pos, 1]\n return next_string(c) if quote?(c)\n\n first_is_identifier_char = identifier_char?(c)\n t = c\n @pos += 1\n while @pos < @length\n c = @sql[@pos, 1]\n break if c == ' '\n\n this_is_identifier_char = identifier_char?(c)\n break if first_is_identifier_char != this_is_identifier_char && @length > 0\n break if !this_is_identifier_char && quote?(c)\n\n t << c\n @pos += 1\n end\n\n case t\n when ''\n nil\n when /^\\d+$/\n t.to_i\n else\n t\n end\n end", "def literal; end", "def next\n token = next_token\n token = next_token while token&.empty?\n token\n end", "def octal_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 70 )\n\n\n\n type = OctalLiteral\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 527:16: '0' ( '0' .. '7' )+\n match( 0x30 )\n # at file 527:20: ( '0' .. '7' )+\n match_count_22 = 0\n while true\n alt_22 = 2\n look_22_0 = @input.peek( 1 )\n\n if ( look_22_0.between?( 0x30, 0x37 ) )\n alt_22 = 1\n\n end\n case alt_22\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x30, 0x37 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n match_count_22 > 0 and break\n eee = EarlyExit(22)\n\n\n raise eee\n end\n match_count_22 += 1\n end\n\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 70 )\n\n\n end", "def lex_en_line_begin; end", "def lex_en_line_begin; end", "def lex_en_line_begin; end", "def next_input_element(hint)\n if ret = @lit_cache[@pos]\n @pos = @lit_nextpos[@pos]\n @head_pos = @pos\n return ret\n end\n pos0 = @pos\n #\n # skip white space here, because ECMA262(5.1.2) says:\n #\n # Simple white space and single-line comments are discarded and\n # do not appear in the stream of input elements for the\n # syntactic grammar.\n #\n while white_space or single_line_comment\n end\n\n ret = line_terminator || multi_line_comment || token\n if ret\n @lit_cache[pos0] = ret\n @lit_nextpos[pos0] = @pos\n @head_pos = @pos\n return ret\n end\n\n if @codes[@pos].nil?\n return nil\n end\n if hint.nil?\n if @codes[@pos] == 0x2f\n ECMA262::LIT_DIV_OR_REGEXP_LITERAL\n else\n nil\n end\n elsif hint == :div\n ret = div_punctuator\n if ret\n @lit_cache[pos0] = ret\n @lit_nextpos[pos0] = @pos\n end\n @head_pos = @pos\n return ret\n elsif hint == :regexp\n ret = regexp_literal\n if ret\n @lit_cache[pos0] = ret\n @lit_nextpos[pos0] = @pos\n end\n @head_pos = @pos\n return ret\n else\n if @codes[@pos] == 0x2f\n ECMA262::LIT_DIV_OR_REGEXP_LITERAL\n else\n nil\n end\n end\n end", "def next\n\t\tif @next_token\n\t\t\ttoken = @next_token\n\t\t\t@next_token = nil\n\t\t\treturn token\n\t\telse\n\t\t\ttoken = read_token\n\t\t\treturn token\n\t\tend\n\tend", "def next_token\n tokens.shift\n end", "def next_token\n token = @enum[@pointer]\n raise NonstringTokenError unless token.nil? || token.kind_of?(String) \n @pointer += 1\n token\n end", "def peek_token\n return nil if @start >= @expr.length\n if @start == 0 && @finish == 0\n return @expr[0]\n else\n token = @expr[@start...@finish]\n\n if token.empty?\n @finish = @finish + 1\n peek_token\n else\n return token\n end\n end\n end", "def next!() end", "def push_literal\n <<-CODE\n next_int;\n t1 = cpu_current_literals(state, c);\n t2 = fast_fetch(t1, _int);\n stack_push(t2);\n CODE\n end", "def lit(sexp, level)\n val = sexp.shift\n case val\n when Numeric\n val.inspect\n when Symbol\n @symbols[val.to_s] ||= \"$symbol_#{@sym_id += 1}\"\n when Regexp\n val == // ? /^/.inspect : val.inspect\n when Range\n \"$range(#{val.begin}, #{val.end}, #{val.exclude_end?})\"\n else\n raise \"Bad lit: #{val.inspect}\"\n end\n end", "def next_token\n \n # Early return if there is nothing to be read. This means we've reached the end of the file.\n \n unless @file[@pos]\n return nil\n end\n \n # This is the token that will be returned.\n token = Compiler::Token.new\n \n # Initializes a new instance of the automaton.\n automaton = Automaton.new\n \n # Will be set inside the loop, if necessary.\n increment_next = false\n \n # Will be set inside the loop. Marks whether we've reached the end of the file.\n eof = false\n \n # Build a new token while we don't have a new word yet and isn't in the failed state\n while ((automaton.state != :A || automaton.word.empty?) && automaton.state != :failed)\n \n # The next input for the automaton\n char = @file[@pos]\n \n if char\n \n # Moves the pointer to the next char\n @pos += 1\n \n automaton.transition(char)\n \n # While the automaton hasn't started to build a new word yet, increments the line and column numbers.\n # In this phase, we're just skipping blank characters\n if automaton.word.empty?\n if increment_next\n if char == \"\\n\"\n increment_next = true\n else\n increment_next = false\n end\n @line += 1\n @column = 0\n elsif char == \"\\n\"\n @column += 1\n increment_next = true\n else\n @column += 1\n end\n end\n \n else\n eof = true\n puts \"breaking\"\n break\n end\n end\n \n \n \n if eof\n automaton.transition(\"\\n\")\n else\n @pos -= 1\n end\n \n if (automaton.type == :identifier) && (Compiler.reserved_words.is_reserved?(automaton.word))\n token.type = :reserved_word\n else\n token.type = automaton.type\n end\n \n token.value = automaton.word\n token.line = @line\n token.column = @column\n \n return token\n \n end", "def next_line\r\n while true\r\n if (@lexemes[@pointer] != nil && @lexemes[@pointer] != '@')\r\n @pointer = @pointer.next\r\n else\r\n break\r\n end\r\n end\r\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n token =\n case state\n when nil then\n case\n when ss.skip(/\\s+/) then\n # do nothing\n when ss.skip(/:(#{SYMBOL_NAME})/o) then\n action { emit :tSYMBOL, &:to_sym }\n when ss.skip(/\"(.+?)\"/) then\n action { emit :tSTRING }\n when ss.skip(/[-+]?\\d+\\.\\d+/) then\n action { emit :tNUMBER, &:to_f }\n when ss.skip(/[-+]?\\d+/) then\n action { emit :tNUMBER, &:to_i }\n when ss.skip(/#{Regexp.union(\n %w\"( ) { | } [ ] < > $ ! ^ ` ... + * ? ,\"\n )}/o) then\n action { emit ss.matched, &:to_sym }\n when ss.skip(/#{REGEXP}/o) then\n action { emit_regexp }\n when ss.skip(/%?(#{CONST_NAME})/o) then\n action { emit :tPARAM_CONST }\n when ss.skip(/%([a-z_]+)/) then\n action { emit :tPARAM_NAMED }\n when ss.skip(/%(\\d*)/) then\n action { emit(:tPARAM_NUMBER) { |s| s.empty? ? 1 : s.to_i } } # Map `%` to `%1`\n when ss.skip(/_(#{IDENTIFIER})/o) then\n action { emit :tUNIFY }\n when ss.skip(/_/o) then\n action { emit :tWILDCARD }\n when ss.skip(/\\#(#{CALL})/o) then\n action { @state = :ARG; emit :tFUNCTION_CALL, &:to_sym }\n when ss.skip(/#{IDENTIFIER}\\?/o) then\n action { @state = :ARG; emit :tPREDICATE, &:to_sym }\n when ss.skip(/#{NODE_TYPE}/o) then\n action { emit :tNODE_TYPE, &:to_sym }\n when ss.skip(/\\#.*/) then\n action { emit_comment }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n when :ARG then\n case\n when ss.skip(/\\(/) then\n action { @state = nil; emit :tARG_LIST }\n when ss.skip(//) then\n action { @state = nil }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def string_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 42)\n\n type = STRING_LITERAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 486:4: '\\\\'' LITERAL_CHAR ( LITERAL_CHAR )* '\\\\''\n match(?\\')\n literal_char!\n # at line 486:22: ( LITERAL_CHAR )*\n loop do #loop 5\n alt_5 = 2\n look_5_0 = @input.peek(1)\n\n if (look_5_0.between?(0x0000, ?&) || look_5_0.between?(?(, 0xFFFF)) \n alt_5 = 1\n\n end\n case alt_5\n when 1\n # at line 486:22: LITERAL_CHAR\n literal_char!\n\n else\n break #loop 5\n end\n end\n match(?\\')\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 42)\n\n end", "def next_token\n @tokens.shift\n end", "def next()\n return \" \" unless has_next()\n if(@count <= 0)\n @char = @compressed_string[@i]\n @i += 1\n @count = get_count()\n end\n @count -= 1\n return @char\n end", "def racc_read_token(t, tok, val); end", "def next_token; @stack.shift; end", "def process_lit(exp)\n # TODO: audit against obfuscator\n value = exp.shift\n case value\n when Integer then\n return \"LONG2NUM(#{value})\"\n when Float then\n return \"rb_float_new(#{value})\"\n when Symbol\n return \"ID2SYM(rb_intern(#{value.to_s.inspect}))\"\n when Range\n f = process_lit [ value.first ]\n l = process_lit [ value.last ]\n x = 0\n x = 1 if value.exclude_end?\n\n return \"rb_range_new(#{f}, #{l}, #{x})\"\n when Regexp\n src = value.source\n return \"rb_reg_new(#{src.inspect}, #{src.size}, #{value.options})\"\n else\n raise \"Bug! no: Unknown literal #{value}:#{value.class}\"\n end\n return nil\n end", "def getNextToken\n \n #Check if the end has been reached\n if @currentChar == nil\n return\n end\n if @currentChar.match(/\\s/) != nil\n skipWhitespaces\n end\n \n if @currentChar == '%'\n comment\n if @currentChar.match(/\\s/) != nil\n skipWhitespaces\n end\n end \n \n if @currentChar.match(/[A-Za-z0-9_]/) != nil\n return Token.new(NAME, name)\n end\n \n if @currentChar == \"\\\"\"\n return Token.new(STRING, string)\n end\n \n if @currentChar == '{'\n advance\n return Token.new(OPENING_BRACE,'{')\n end\n \n if @currentChar == '}'\n advance\n return Token.new(CLOSING_BRACE,'}')\n end\n \n if @currentChar == '['\n advance\n return Token.new(OPENING_BRACKET,'[')\n end\n \n if @currentChar == ']'\n advance\n return Token.new(CLOSING_BRACKET,']')\n end\n \n if @currentChar == ':'\n advance\n return Token.new(COLON,':')\n end\n \n if @currentChar == '*'\n advance\n return Token.new(ASTERIX,'*')\n end\n \n if @currentChar == '='\n advance\n return Token.new(EQUALS,'=')\n end\n \n if @currentChar == ';'\n advance\n return Token.new(SEMICOLON,';')\n end\n \n if @currentChar == '^'\n advance\n return Token.new(CIRCUMFLEX,'^')\n end\n \n if @currentChar == '+'\n advance\n return Token.new(PLUS,'+')\n end\n if @currentChar == '('\n advance\n return Token.new(OPENING_PARANTHESIS,'(')\n end\n if @currentChar == ')'\n advance\n return Token.new(CLOSING_PARANTHESIS,')')\n end\n if @currentChar == '.'\n advance\n return Token.new(DOT,'.')\n end\n if @currentChar == '#'\n advance\n return Token.new(HASH,'#')\n end\n if @currentChar == ','\n advance\n return Token.new(COMMA,',')\n end\n error\n \n return Token.new(EOF,'EOF') \n \n end", "def consume!\n empty_line ||\n name_token ||\n comment_token ||\n whitespace_token ||\n line_token ||\n heredoc_token ||\n string_token ||\n number_token ||\n regex_token ||\n literal_token\n end", "def lex_en_line_begin=(_arg0); end", "def lex_en_line_begin=(_arg0); end", "def lex_en_line_begin=(_arg0); end", "def find_literal(what)\n idx = @literals.index(what)\n return idx if idx\n add_literal(what)\n end", "def next\n displacement = @file.gets.try(:chomp).try(:to_f)\n return nil unless displacement\n\n ret = @curr_val\n @curr_val += displacement\n ret\n end", "def get_token\n @tokenbuf << read_token if @tokenbuf.length == 0\n return @tokenbuf.shift\n end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n token =\n case state\n when nil then\n case\n when text = ss.scan(/#{DIGIT}/) then\n action { [:DIGIT, text.to_i] }\n when text = ss.scan(/#{ADDITION}/) then\n action { [:ADDITION, text] }\n when text = ss.scan(/#{SUBSTRACTION}/) then\n action { [:SUBSTRACTION, text] }\n when text = ss.scan(/#{MULTIPLICATION}/) then\n action { [:MULTIPLICATION, text] }\n when text = ss.scan(/#{DIVISION}/) then\n action { [:DIVISION, text] }\n when text = ss.scan(/#{OPENING_PARANTHESIS}/) then\n action { [:OPENING_PARANTHESIS, text] }\n when text = ss.scan(/#{CLOSING_PARANTHESIS}/) then\n action { [:CLOSING_PARANTHESIS, text] }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def next\n @tok ||= read_token\n @tok, tok = nil, @tok\n @prev = tok\n return tok\n end", "def lex_start; end", "def lex_start; end", "def lex_start; end", "def lex_start; end", "def next() end", "def next() end", "def next_token\n\t\[email protected]_token\n\tend", "def peek\n @tokens[@position]\n end", "def get_next\n return if eof?\n\n @buffer << @io.gets if @buffer.empty?\n\n until @io.eof?\n line = @io.gets\n next unless line\n\n if @parser.start_new?(line) || @buffer.empty?\n @buffer << line\n break\n else\n @buffer.last << line\n end\n end\n\n return if @buffer.empty?\n @parser.parse(@buffer.slice!(0)) || self.get_next\n end", "def push_literal(*args)\n new_literal = Literal.new(self, *args)\n @literal_stack.push(new_literal)\n\n if new_literal.words? && new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_backslash_delimited_words\n else\n self.class.lex_en_plain_backslash_delimited_words\n end\n elsif new_literal.words? && !new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_words\n else\n self.class.lex_en_plain_words\n end\n elsif !new_literal.words? && new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_backslash_delimited\n else\n self.class.lex_en_plain_backslash_delimited\n end\n else\n if new_literal.interpolate?\n self.class.lex_en_interp_string\n else\n self.class.lex_en_plain_string\n end\n end\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n if ss.check(/\\n/) then\n self.lineno += 1\n # line starts 1 position after the newline\n self.start_of_current_line_pos = ss.pos + 1\n end\n self.old_pos = ss.pos\n token =\n case state\n when nil, :option, :inner, :start, :macro, :rule, :group then\n case\n when ss.skip(/options?.*/) then\n [:state, :option]\n when ss.skip(/inner.*/) then\n [:state, :inner]\n when ss.skip(/macros?.*/) then\n [:state, :macro]\n when ss.skip(/rules?.*/) then\n [:state, :rule]\n when ss.skip(/start.*/) then\n [:state, :start]\n when ss.skip(/end/) then\n [:state, :END]\n when ss.skip(/\\A((?:.|\\n)*)class ([\\w:]+.*)/) then\n action { [:class, *matches] }\n when ss.skip(/\\n+/) then\n # do nothing\n when text = ss.scan(/\\s*(\\#.*)/) then\n action { [:comment, text] }\n when (state == :option) && (ss.skip(/\\s+/)) then\n # do nothing\n when (state == :option) && (text = ss.scan(/stub/i)) then\n action { [:option, text] }\n when (state == :option) && (text = ss.scan(/debug/i)) then\n action { [:option, text] }\n when (state == :option) && (text = ss.scan(/do_parse/i)) then\n action { [:option, text] }\n when (state == :option) && (text = ss.scan(/lineno/i)) then\n action { [:option, text] }\n when (state == :option) && (text = ss.scan(/column/i)) then\n action { [:option, text] }\n when (state == :inner) && (text = ss.scan(/.*/)) then\n action { [:inner, text] }\n when (state == :start) && (text = ss.scan(/.*/)) then\n action { [:start, text] }\n when (state == :macro) && (ss.skip(/\\s+(\\w+)\\s+#{RE}/o)) then\n action { [:macro, *matches] }\n when (state == :rule) && (ss.skip(/\\s*#{ST}?[\\ \\t]*#{RE}[\\ \\t]*#{ACT}?/o)) then\n action { [:rule, *matches] }\n when (state == :rule) && (ss.skip(/\\s*:[\\ \\t]*#{RE}/o)) then\n action { [:grouphead, *matches] }\n when (state == :group) && (ss.skip(/\\s*:[\\ \\t]*#{RE}/o)) then\n action { [:grouphead, *matches] }\n when (state == :group) && (ss.skip(/\\s*\\|\\s*#{ST}?[\\ \\t]*#{RE}[\\ \\t]*#{ACT}?/o)) then\n action { [:group, *matches] }\n when (state == :group) && (ss.skip(/\\s*#{ST}?[\\ \\t]*#{RE}[\\ \\t]*#{ACT}?/o)) then\n action { [:groupend, *matches] }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n when :END then\n case\n when ss.skip(/\\n+/) then\n # do nothing\n when text = ss.scan(/.*/) then\n action { [:end, text] }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def char_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 41)\n\n type = CHAR_LITERAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 482:4: '\\\\'' LITERAL_CHAR '\\\\''\n match(?\\')\n literal_char!\n match(?\\')\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 41)\n\n end", "def run(source, until_token = :invalid, token_count = nil)\n @at_end = false\n @source = source\n @reader = source.each_char\n\n read_next()\n\n while token_count == nil || token_count > 0\n skip_whitespace()\n current = @marker.character\n break unless current\n\n token = Token.new\n token.kind = :invalid\n token.from = @marker.source_index\n token.position = @marker.position.dup\n\n case current\n when ?\", ?'\n read_string(token)\n\n when ?0\n case peek_next()\n when ?x, ?X, ?b, ?B then read_base_number(token)\n else read_number(token)\n end\n\n when ?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9\n read_number(token)\n\n # dot, double dot, triple dot, and floats beginning with a dot\n when ?.\n token.kind = :dot\n case peek_next()\n when ?0, ?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9 then read_number(token)\n when ?.\n read_next()\n token.kind = :double_dot\n\n if peek_next() == ?.\n read_next()\n token.kind = :triple_dot\n end\n\n token.value = Token::DESCRIPTORS[token.kind]\n else\n token.value = Token::DESCRIPTORS[token.kind]\n end\n\n when ?_, ?a, ?b, ?c, ?d, ?e, ?f, ?g, ?h, ?i, ?j, ?k, ?l, ?m, ?n, ?o, ?p,\n ?q, ?r, ?s, ?t, ?u, ?v, ?w, ?x, ?y, ?z, ?A, ?B, ?C, ?D, ?E, ?F, ?G, ?H,\n ?I, ?J, ?K, ?L, ?M, ?N, ?O, ?P, ?Q, ?R, ?S, ?T, ?U, ?V, ?W, ?X, ?Y, ?Z\n read_word(token)\n\n when ?\\n\n token.value = current\n token.kind = :newline\n\n when ??, ?#, ?@, ?$, ?%, ?(, ?), ?[, ?], ?{, ?}, ?^, ?~, ?`, ?\\\\, ?,, ?;\n token.value = current\n token.kind = PUNCTUATION_STRINGS[current]\n\n when ?=, ?|, ?&, ?:, ?+, ?*\n current << read_next() if peek_next() == current\n token.value = current\n token.kind = PUNCTUATION_STRINGS[current]\n\n when ?!\n current << read_next() if peek_next() == ?=\n token.value = current\n token.kind = PUNCTUATION_STRINGS[current]\n\n when ?>, ?<\n case peek_next()\n when ?=, current then current << read_next()\n end\n token.value = current\n token.kind = PUNCTUATION_STRINGS[current]\n\n when ?-\n case peek_next()\n when ?>, current then current << read_next()\n end\n token.value = current\n token.kind = PUNCTUATION_STRINGS[current]\n\n when ?/\n case peek_next()\n when ?/ then read_line_comment(token)\n when ?* then read_block_comment(token)\n else\n token.value = Token::DESCRIPTORS[token.kind = :slash]\n read_next()\n end\n\n end # case current\n\n token.to = @marker.source_index\n last_kind = token.kind\n if !(@skip_comments && token.comment?) && !(@skip_newlines && token.newline?)\n if last_kind != :invalid\n @tokens << token\n yield token if block_given?\n else\n raise RuntimeError, \"#{token.position} Invalid token: #{token.inspect}\"\n end\n end\n\n break if until_token == last_kind\n\n read_next()\n token_count -= 1 unless token_count.nil?\n end # while current && token_count > 0\n\n @source = nil\n @reader = nil\n\n self\n end", "def lex_en_expr_mid; end", "def lex_en_expr_mid; end", "def lex_en_expr_mid; end", "def lex_en_expr_beg=(_arg0); end", "def lex_en_expr_beg=(_arg0); end", "def lex_en_expr_beg=(_arg0); end", "def set_literal\n <<-CODE\n next_int;\n tuple_put(state, cpu_current_literals(state, c), _int, stack_top());\n CODE\n end", "def _reduce_279(val, _values, result)\n result = lexer.line\n \n result\nend", "def match(ptr, depth = 0)\n case c = ptr.peek(1)\n when '\"', '`'\n start_pos = ptr.pos\n ptr.pos += 1\n AST.new(:string, value: ptr.scan_until(/#{c}/).chop,\n attributes: { type: char_to_type(c) },\n pos: start_pos)\n end\n end", "def next()\n @index += 1\n @string[@index...(@index+1)]\n end", "def next_word\n return unless md = get.match(FORWARD_WORD, cursor)\n self.cursor = md.offset(0).last\n end", "def next_token\n return [false, false] if @src.eos?\n# p @src.rest if @yydebug\n if ret = @src.scan(EM_OPEN_RE)\n @pre << ret\n [:EM_OPEN, ret]\n elsif ret = @src.scan(EM_CLOSE_RE)\n @pre << ret\n [:EM_CLOSE, ret]\n elsif ret = @src.scan(CODE_OPEN_RE)\n @pre << ret\n [:CODE_OPEN, ret]\n elsif ret = @src.scan(CODE_CLOSE_RE)\n @pre << ret\n [:CODE_CLOSE, ret]\n elsif ret = @src.scan(VAR_OPEN_RE)\n @pre << ret\n [:VAR_OPEN, ret]\n elsif ret = @src.scan(VAR_CLOSE_RE)\n @pre << ret\n [:VAR_CLOSE, ret]\n elsif ret = @src.scan(KBD_OPEN_RE)\n @pre << ret\n [:KBD_OPEN, ret]\n elsif ret = @src.scan(KBD_CLOSE_RE)\n @pre << ret\n [:KBD_CLOSE, ret]\n elsif ret = @src.scan(INDEX_OPEN_RE)\n @pre << ret\n [:INDEX_OPEN, ret]\n elsif ret = @src.scan(INDEX_CLOSE_RE)\n @pre << ret\n [:INDEX_CLOSE, ret]\n elsif ret = @src.scan(REF_OPEN_RE)\n @pre << ret\n [:REF_OPEN, ret]\n elsif ret = @src.scan(REF_CLOSE_RE)\n @pre << ret\n [:REF_CLOSE, ret]\n elsif ret = @src.scan(FOOTNOTE_OPEN_RE)\n @pre << ret\n [:FOOTNOTE_OPEN, ret]\n elsif ret = @src.scan(FOOTNOTE_CLOSE_RE)\n @pre << ret\n [:FOOTNOTE_CLOSE, ret]\n elsif ret = @src.scan(VERB_OPEN_RE)\n @pre << ret\n [:VERB_OPEN, ret]\n elsif ret = @src.scan(VERB_CLOSE_RE)\n @pre << ret\n [:VERB_CLOSE, ret]\n elsif ret = @src.scan(BAR_RE)\n @pre << ret\n [:BAR, ret]\n elsif ret = @src.scan(QUOTE_RE)\n @pre << ret\n [:QUOTE, ret]\n elsif ret = @src.scan(SLASH_RE)\n @pre << ret\n [:SLASH, ret]\n elsif ret = @src.scan(BACK_SLASH_RE)\n @pre << ret\n [:BACK_SLASH, ret]\n elsif ret = @src.scan(URL_RE)\n @pre << ret\n [:URL, ret]\n elsif ret = @src.scan(OTHER_RE)\n @pre << ret\n [:OTHER, ret]\n else\n ret = @src.rest\n @pre << ret\n @src.terminate\n [:OTHER, ret]\n end\nend", "def extract_first_expression(lines, consume = T.unsafe(nil), &block); end", "def read_character\n lit = read_literal\n\n return \" \" if lit.empty? && peek_char == \" \"\n CHARACTERS.fetch(lit.downcase) do\n # Return just the first character\n unread(lit[1..-1])\n lit[0,1]\n end\n end", "def get\n @source_index += 1\n\n # Maintain line count.\n prev_char = @source_text[@source_index - 1]\n if @source_index.positive? && prev_char == \"\\n\"\n @line_index += 1\n @col_index = -1\n end\n\n @col_index += 1\n char = if @source_index > @last_index\n # Read past the end of source text.\n END_MARK\n else\n @source_text[@source_index]\n end\n Character.new(char, @line_index, @col_index, @source_index, @source_text)\n end", "def next()\n if has_next()\n @strings[0][1]-=1\n c = @strings[0][0]\n while has_next() and @strings[0][1] == 0\n @strings.shift\n end\n return c\n end\n return \" \"\n end", "def la( count = 1 )\n until @lookahead.length >= count\n if token = @lexer.next_token( @lexer_state.number ) then\n @lookahead << token\n else\n nyi \"error handling for lexer error\" if @lexer.input_remaining?\n break\n end\n end\n \n return @lookahead[count - 1]\n end", "def peek\n @tokens[@pos]\n end", "def peek_token(n=1)\n n.times{|x| @tokenbuf << read_token if @tokenbuf.length == 0 }\n return @tokenbuf[n-1]\n end", "def next_char\n @pos += 1\n if (c = @source[@pos..@pos]) == BACKSLASH\n @pos += 1\n [true, @source[@pos..@pos]]\n else\n [false, c]\n end\n end", "def next_item\n lexeme, token = @lexer.next, nil\n if lexeme[0].nil?\n token = { type: :eof }\n elsif lexeme[0].lol_string?\n token = { type: :string, data: lexeme[0][1..-2] }\n elsif lexeme[0].lol_integer?\n token = { type: :integer, data: lexeme[0].to_i }\n elsif lexeme[0].lol_float?\n token = { type: :float, data: lexeme[0].to_f }\n elsif lexeme[0].lol_boolean?\n token = { type: :boolean, data: (lexeme[0] == 'WIN') }\n elsif lexeme[0] == '!'\n token = { type: :exclamation }\n elsif lexeme[0] == \"\\n\"\n token = { type: :newline }\n else\n # Try to match keyword\n token_type = match_longest(lexeme[0], @token_table)\n unless token_type.nil?\n token = { type: token_type }\n # Consume all peeked lexemes\n token_type.to_s.count('_').times { @lexer.next }\n else\n # Try to match identifier\n if lexeme[0].lol_identifier?\n token = { type: :identifier, data: lexeme[0] }\n end\n end\n end\n raise UnknownTokenError.new(lexeme) if token.nil?\n token.merge(line: lexeme[1], pos: lexeme[2])\n end", "def lex_en_expr_end; end", "def lex_en_expr_end; end", "def lex_en_expr_end; end", "def string_literal\n # StringLiteral ::\n # \" DoubleStringCharactersopt \"\n # ' SingleStringCharactersopt '\n #\n # DoubleStringCharacters ::\n # DoubleStringCharacter DoubleStringCharactersopt\n #\n # SingleStringCharacters ::\n # SingleStringCharacter SingleStringCharactersopt\n #\n # DoubleStringCharacter ::\n # SourceCharacter but not one of \" or \\ or LineTerminator\n # \\ EscapeSequence\n # LineContinuation\n #\n # SingleStringCharacter ::\n # SourceCharacter but not one of ' or \\ or LineTerminator\n # \\ EscapeSequence\n # LineContinuation\n #\n if (code = @codes[@pos]) == 0x27 #'\n term = 0x27\n elsif code == 0x22 #\"\n term = 0x22\n else\n return nil\n end\n @pos += 1\n pos0 = @pos\n\n str = []\n while (code = @codes[@pos])\n if code.nil?\n raise ParseError.new(\"no `#{term}' at end of string\", self)\n elsif line_terminator?(code)\n raise ParseError.new(\"string has line terminator in body\", self)\n elsif code == 0x5c #\\\n @pos += 1\n str.push(escape_sequence)\n elsif code == term\n @pos += 1\n return ECMA262::ECMA262String.new(str.compact.pack(\"U*\"))\n else\n @pos += 1\n str.push(code)\n end\n end\n nil\n end", "def peek\n @tok ||= read_token\n end", "def next(pointer); end", "def next(pointer); end", "def next_token\n return if @scanner.eos?\n\n if @scanner.scan(SKIP_PATTERN)\n @column += @scanner[:before].length\n\n new_lines = @scanner[:new_line].delete(\"\\r\")\n unless new_lines.empty?\n @lineno += new_lines.length\n @column = 0\n end\n\n @column += @scanner[:after].length\n end\n\n token =\n case\n when try_match(REFERENCE_PATTERN)\n Token.new :REFERENCE, @scanner[:identifier], @lineno, @column\n when try_match(PATH_PATTERN)\n Token.new :PATH, @scanner[:identifier], @lineno, @column\n when try_match(FILTER_PATTERN) && @scanner.check(OPEN_PAREN_PATTERN)\n Token.new :FILTER, \"?\", @lineno, @column\n when try_match(OPEN_BRACKET_PATTERN)\n @state_stack.push Token.new :OPEN_BRACKET, \"[\", @lineno, @column\n @state_stack.last\n when try_match(OPEN_PAREN_PATTERN)\n @state_stack.push Token.new :OPEN_PAREN, \"(\", @lineno, @column\n @state_stack.last\n when try_match(CLOSE_BRACKET_PATTERN)\n last = @state_stack.pop\n unless last\n raise TokenizeError.unexpected(\"]\", @lineno, @column)\n end\n unless last.type == :OPEN_BRACKET\n raise TokenizeError.unbalanced(\"[\", last.lineno, last.column)\n end\n Token.new :CLOSE_BRACKET, \"]\", @lineno, @column\n when try_match(CLOSE_PAREN_PATTERN)\n last = @state_stack.pop\n unless last\n raise TokenizeError.unexpected(\")\", @lineno, @column)\n end\n unless last.type == :OPEN_PAREN\n raise TokenizeError.unbalanced(\"(\", last.lineno, last.column)\n end\n Token.new :CLOSE_PAREN, \")\", @lineno, @column\n when try_match(SELF_PATTERN)\n Token.new :SELF, \"@\", @lineno, @column\n when try_match(NUMBER_PATTERN)\n Token.new :NUMBER, BigDecimal.new(@last_captured), @lineno, @column\n when try_match(STRING_PATTERN)\n Token.new :STRING, @scanner[:str], @lineno, @column\n when try_match(TRUE_PATTERN)\n Token.new :BOOLEAN, true, @lineno, @column\n when try_match(FALSE_PATTERN)\n Token.new :BOOLEAN, false, @lineno, @column\n when try_match(COLON_PATTERN)\n Token.new :COLON, \":\", @lineno, @column\n when try_match(COMMA_PATTERN)\n Token.new :COMMA, \",\", @lineno, @column\n when try_match(ADD_PATTERN)\n Token.new :ADD, \"+\", @lineno, @column\n when try_match(SUBTRACT_PATTERN)\n case @tokens.last&.type\n when nil, :OPEN_PAREN, :OPEN_BRACKET, :COMMA, :COLON, :POW, :MOD, :ADD, :SUBTRACT, :MULTIPLY, :DIVIDE\n if @scanner.check(NUMBER_PATTERN) ||\n @scanner.check(REFERENCE_PATTERN) ||\n @scanner.check(SUBTRACT_PATTERN) ||\n @scanner.check(OPEN_PAREN_PATTERN)\n Token.new :UMINUS, \"-\", @lineno, @column\n else\n raise TokenizeError.unexpected(\"-\", @lineno, @column)\n end\n else\n Token.new :SUBTRACT, \"-\", @lineno, @column\n end\n when try_match(MULTIPLY_PATTERN)\n Token.new :MULTIPLY, \"*\", @lineno, @column\n when try_match(DIVIDE_PATTERN)\n Token.new :DIVIDE, \"/\", @lineno, @column\n when try_match(POW_PATTERN)\n Token.new :POW, \"^\", @lineno, @column\n when try_match(MOD_PATTERN)\n Token.new :MOD, \"%\", @lineno, @column\n when try_match(EQUAL_TO_PATTERN)\n Token.new :EQUAL_TO, \"==\", @lineno, @column\n when try_match(NOT_EQUAL_TO_PATTERN)\n Token.new :NOT_EQUAL_TO, \"!=\", @lineno, @column\n when try_match(GREATER_THAN_OR_EQUAL_TO_PATTERN)\n Token.new :GREATER_THAN_OR_EQUAL_TO, \">=\", @lineno, @column\n when try_match(GREATER_THAN_PATTERN)\n Token.new :GREATER_THAN, \">\", @lineno, @column\n when try_match(LESS_THAN_OR_EQUAL_TO_PATTERN)\n Token.new :LESS_THAN_OR_EQUAL_TO, \"<=\", @lineno, @column\n when try_match(LESS_THAN_PATTERN)\n Token.new :LESS_THAN, \"<\", @lineno, @column\n when try_match(AND_PATTERN)\n Token.new :AND, \"&&\", @lineno, @column\n when try_match(OR_PATTERN)\n Token.new :OR, \"||\", @lineno, @column\n when try_match(NOT_PATTERN)\n Token.new :NOT, \"!\", @lineno, @column\n when try_match(INTERSECT_PATTERN)\n Token.new :INTERSECT, \"&\", @lineno, @column\n when try_match(UNION_PATTERN)\n Token.new :UNION, \"|\", @lineno, @column\n when try_match(IDENTIFIER_PATTERN) && @scanner.check(OPEN_PAREN_PATTERN)\n unless @scanner.check(OPEN_PAREN_PATTERN)\n raise TokenizeError.unexpected(@scanner.peek(7), @lineno, @column)\n end\n Token.new :FUNCTION, @last_captured, @lineno, @column\n else\n raise TokenizeError.unexpected(@scanner.peek(7), @lineno, @column)\n end\n\n @column += @last_captured.length\n @tokens << token\n\n token\n end" ]
[ "0.65581006", "0.6516196", "0.6512108", "0.64047813", "0.63073653", "0.6299561", "0.62135106", "0.6199663", "0.6189973", "0.61801773", "0.61611176", "0.61535245", "0.61292636", "0.6067928", "0.60310274", "0.60310274", "0.60310274", "0.6014107", "0.6001202", "0.59818244", "0.5962569", "0.5947063", "0.5915781", "0.59049356", "0.59039044", "0.5894369", "0.5894369", "0.5894369", "0.589414", "0.5887007", "0.58514696", "0.583793", "0.5814692", "0.5801789", "0.575461", "0.57496065", "0.57493514", "0.57488936", "0.57345587", "0.5731296", "0.5706506", "0.5703749", "0.5699763", "0.5686184", "0.5646719", "0.56423604", "0.564069", "0.56291544", "0.56291544", "0.56291544", "0.56217784", "0.55954504", "0.55904275", "0.5586504", "0.5586504", "0.5586504", "0.5586504", "0.55846477", "0.55842346", "0.55816674", "0.55816674", "0.55816674", "0.55816674", "0.5577274", "0.5577274", "0.55746335", "0.5566382", "0.55537355", "0.55444306", "0.5535074", "0.55341196", "0.55233103", "0.54970586", "0.54970586", "0.54970586", "0.54947764", "0.54947764", "0.54947764", "0.54933816", "0.5492749", "0.54900914", "0.5487004", "0.54756427", "0.54723954", "0.54686815", "0.5465442", "0.54542917", "0.5442926", "0.5442916", "0.544206", "0.54387397", "0.5434612", "0.54335654", "0.54334503", "0.54334503", "0.54334503", "0.5432716", "0.5427248", "0.5425564", "0.5425564", "0.5419204" ]
0.0
-1
Forwards position after calling peek_lit. This method quickly forward position after calling peek_lit.
def fwd_after_peek @pos = @head_pos end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def peek_lit(hint)\n pos0 = @pos\n while lit = next_input_element(hint) and (lit.ws? or lit.lt?)\n end\n @pos = pos0\n lit\n end", "def peek_lit_nolt(hint)\n pos0 = @pos\n while lit = next_input_element(hint) and lit.ws?\n end\n @pos = pos0\n lit\n end", "def next\n peek.tap { @position += 1 }\n end", "def advance\n return if @current or @hidden\n @current_pos += 1.5\n if @current_pos >= @lines.last.length then\n @current = true\n end\n end", "def reconsume\n @pos -= 1 if @pos > 0\n end", "def fwd_lit(hint)\n while lit = next_input_element(hint) and (lit.ws? or lit.lt?)\n end\n lit\n end", "def current; peek(0) end", "def retreat\n @position = @position - 1 rescue -1\n self[@position + 1]\n end", "def peek()\n \n end", "def advance\n if @token_queue.any?\n return @token_queue.shift\n end\n\n # Ugly, but dependent on Ragel output. Consider refactoring it somehow.\n klass = self.class\n _lex_trans_keys = klass.send :_lex_trans_keys\n _lex_key_spans = klass.send :_lex_key_spans\n _lex_index_offsets = klass.send :_lex_index_offsets\n _lex_indicies = klass.send :_lex_indicies\n _lex_trans_targs = klass.send :_lex_trans_targs\n _lex_trans_actions = klass.send :_lex_trans_actions\n _lex_to_state_actions = klass.send :_lex_to_state_actions\n _lex_from_state_actions = klass.send :_lex_from_state_actions\n _lex_eof_trans = klass.send :_lex_eof_trans\n\n pe = @source_pts.size + 2\n p, eof = @p, pe\n\n cmd_state = @command_start\n @command_start = false\n\n \n# line 10991 \"lib/parser/lexer.rb\"\nbegin\n\ttestEof = false\n\t_slen, _trans, _keys, _inds, _acts, _nacts = nil\n\t_goto_level = 0\n\t_resume = 10\n\t_eof_trans = 15\n\t_again = 20\n\t_test_eof = 30\n\t_out = 40\n\twhile true\n\tif _goto_level <= 0\n\tif p == pe\n\t\t_goto_level = _test_eof\n\t\tnext\n\tend\n\tif @cs == 0\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\tend\n\tif _goto_level <= _resume\n\tcase _lex_from_state_actions[ @cs] \n\twhen 97 then\n# line 1 \"NONE\"\n\t\tbegin\n @ts = p\n\t\tend\n# line 11019 \"lib/parser/lexer.rb\"\n\tend\n\t_keys = @cs << 1\n\t_inds = _lex_index_offsets[ @cs]\n\t_slen = _lex_key_spans[ @cs]\n\t_wide = ( (@source_pts[p] || 0))\n\t_trans = if ( _slen > 0 && \n\t\t\t_lex_trans_keys[_keys] <= _wide && \n\t\t\t_wide <= _lex_trans_keys[_keys + 1] \n\t\t ) then\n\t\t\t_lex_indicies[ _inds + _wide - _lex_trans_keys[_keys] ] \n\t\t else \n\t\t\t_lex_indicies[ _inds + _slen ]\n\t\t end\n\tend\n\tif _goto_level <= _eof_trans\n\t @cs = _lex_trans_targs[_trans]\n\tif _lex_trans_actions[_trans] != 0\n\tcase _lex_trans_actions[_trans]\n\twhen 28 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 117 then\n# line 817 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n\twhen 29 then\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n\twhen 59 then\n# line 1217 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n\twhen 63 then\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n\twhen 304 then\n# line 1261 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 36 then\n# line 1542 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 38 then\n# line 1562 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 40 then\n# line 1590 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 70 then\n# line 1782 \"lib/parser/lexer.rl\"\n\t\tbegin\n heredoc_e = p \t\tend\n\twhen 343 then\n# line 1874 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 1; diag_msg = :ivar_name \t\tend\n\twhen 346 then\n# line 1875 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2; diag_msg = :cvar_name \t\tend\n\twhen 354 then\n# line 1895 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = nil \t\tend\n\twhen 383 then\n# line 1984 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 302 then\n# line 2072 \"lib/parser/lexer.rl\"\n\t\tbegin\n ident_tok = tok; ident_ts = @ts; ident_te = @te; \t\tend\n\twhen 470 then\n# line 2258 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 16; @num_digits_s = p \t\tend\n\twhen 464 then\n# line 2259 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = p \t\tend\n\twhen 467 then\n# line 2260 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = p \t\tend\n\twhen 461 then\n# line 2261 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 2; @num_digits_s = p \t\tend\n\twhen 476 then\n# line 2262 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n\twhen 438 then\n# line 2263 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = @ts \t\tend\n\twhen 453 then\n# line 2264 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 446 then\n# line 2321 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 443 then\n# line 2322 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 88 then\n# line 2510 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 7 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n\twhen 113 then\n# line 1094 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DBEG, '#{'.freeze)\n\n if current_literal.heredoc?\n current_literal.saved_herebody_s = @herebody_s\n @herebody_s = nil\n end\n\n current_literal.start_interp_brace\n @command_start = true\n @cs = 773;\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 5 then\n# line 1020 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 328\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 109 then\n# line 945 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 108 then\n# line 866 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 766;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 115 then\n# line 1036 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @version >= 27\n literal.extend_string(tok, @ts, @te)\n else\n message = tok.start_with?('#@@') ? :cvar_name : :ivar_name\n diagnostic :error, message, { :name => tok(@ts + 1, @te) }, range(@ts + 1, @te)\n end\n end\n\t\tend\n\twhen 114 then\n# line 1020 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 328\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 111 then\n# line 1007 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n literal.extend_space @ts, @te\n end\n\t\tend\n\twhen 112 then\n# line 866 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 766;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 6 then\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 4 then\n# line 866 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 766;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 142 then\n# line 1094 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DBEG, '#{'.freeze)\n\n if current_literal.heredoc?\n current_literal.saved_herebody_s = @herebody_s\n @herebody_s = nil\n end\n\n current_literal.start_interp_brace\n @command_start = true\n @cs = 773;\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 10 then\n# line 1020 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 328\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 139 then\n# line 945 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 138 then\n# line 866 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 766;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 144 then\n# line 1036 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @version >= 27\n literal.extend_string(tok, @ts, @te)\n else\n message = tok.start_with?('#@@') ? :cvar_name : :ivar_name\n diagnostic :error, message, { :name => tok(@ts + 1, @te) }, range(@ts + 1, @te)\n end\n end\n\t\tend\n\twhen 143 then\n# line 1020 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 328\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 141 then\n# line 866 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 766;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 11 then\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 9 then\n# line 866 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 766;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 167 then\n# line 945 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 166 then\n# line 866 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 766;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 169 then\n# line 1007 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n literal.extend_space @ts, @te\n end\n\t\tend\n\twhen 170 then\n# line 866 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 766;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 173 then\n# line 945 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 172 then\n# line 866 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 766;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 175 then\n# line 866 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 766;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 182 then\n# line 1094 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DBEG, '#{'.freeze)\n\n if current_literal.heredoc?\n current_literal.saved_herebody_s = @herebody_s\n @herebody_s = nil\n end\n\n current_literal.start_interp_brace\n @command_start = true\n @cs = 773;\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 13 then\n# line 1020 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 328\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 179 then\n# line 945 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 178 then\n# line 866 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 766;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 184 then\n# line 1036 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @version >= 27\n literal.extend_string(tok, @ts, @te)\n else\n message = tok.start_with?('#@@') ? :cvar_name : :ivar_name\n diagnostic :error, message, { :name => tok(@ts + 1, @te) }, range(@ts + 1, @te)\n end\n end\n\t\tend\n\twhen 183 then\n# line 1020 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 328\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 181 then\n# line 866 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 766;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 12 then\n# line 866 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 766;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 186 then\n# line 945 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 185 then\n# line 866 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 766;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 193 then\n# line 1094 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DBEG, '#{'.freeze)\n\n if current_literal.heredoc?\n current_literal.saved_herebody_s = @herebody_s\n @herebody_s = nil\n end\n\n current_literal.start_interp_brace\n @command_start = true\n @cs = 773;\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 15 then\n# line 1020 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 328\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 189 then\n# line 945 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 188 then\n# line 866 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 766;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 195 then\n# line 1036 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @version >= 27\n literal.extend_string(tok, @ts, @te)\n else\n message = tok.start_with?('#@@') ? :cvar_name : :ivar_name\n diagnostic :error, message, { :name => tok(@ts + 1, @te) }, range(@ts + 1, @te)\n end\n end\n\t\tend\n\twhen 194 then\n# line 1020 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 328\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 191 then\n# line 1007 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n literal.extend_space @ts, @te\n end\n\t\tend\n\twhen 192 then\n# line 866 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 766;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 14 then\n# line 866 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 766;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 197 then\n# line 945 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 196 then\n# line 866 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 766;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 199 then\n# line 1007 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n literal.extend_space @ts, @te\n end\n\t\tend\n\twhen 200 then\n# line 1191 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tREGEXP_OPT, tok(@ts, @te - 1), @ts, @te - 1)\n p = p - 1;\n \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 201 then\n# line 1178 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n unknown_options = tok.scan(/[^imxouesn]/)\n if unknown_options.any?\n diagnostic :error, :regexp_options,\n { :options => unknown_options.join }\n end\n\n emit(:tREGEXP_OPT)\n @cs = 781;\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 16 then\n# line 1330 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if tok =~ /^\\$([1-9][0-9]*)$/\n emit(:tNTH_REF, tok(@ts + 1).to_i)\n elsif tok =~ /^\\$([&`'+])$/\n emit(:tBACK_REF)\n else\n emit(:tGVAR)\n end\n\n @cs = (stack_pop); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 202 then\n# line 1330 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if tok =~ /^\\$([1-9][0-9]*)$/\n emit(:tNTH_REF, tok(@ts + 1).to_i)\n elsif tok =~ /^\\$([&`'+])$/\n emit(:tBACK_REF)\n else\n emit(:tGVAR)\n end\n\n @cs = (stack_pop); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 204 then\n# line 1343 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if tok =~ /^@@[0-9]/\n diagnostic :error, :cvar_name, { :name => tok }\n end\n\n emit(:tCVAR)\n @cs = (stack_pop); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 203 then\n# line 1353 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if tok =~ /^@[0-9]/\n diagnostic :error, :ivar_name, { :name => tok }\n end\n\n emit(:tIVAR)\n @cs = (stack_pop); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 225 then\n# line 1374 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(KEYWORDS_BEGIN);\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 211 then\n# line 1382 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tIDENTIFIER)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 18 then\n# line 1386 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1\n @cs = 781; \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 328\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 208 then\n# line 1395 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 220 then\n# line 1399 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; p = p - 1; \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 20 then\n# line 1405 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if version?(23)\n type, delimiter = tok[0..-2], tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n else\n p = @ts - 1\n \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 207 then\n# line 1418 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 206 then\n# line 527 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 224 then\n# line 1374 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(KEYWORDS_BEGIN);\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 221 then\n# line 1378 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tCONSTANT)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 223 then\n# line 1382 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tIDENTIFIER)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 218 then\n# line 1386 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n @cs = 781; \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 328\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 214 then\n# line 1395 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 219 then\n# line 1402 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 212 then\n# line 1415 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 217 then\n# line 1418 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 19 then\n# line 1395 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin emit_table(PUNCTUATION)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 17 then\n# line 1418 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = p - 1; \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 210 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 43 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS_BEGIN);\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 44 then\n\tbegin begin p = (( @te))-1; end\n emit(:tCONSTANT)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 45 then\n\tbegin begin p = (( @te))-1; end\n emit(:tIDENTIFIER)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 22 then\n# line 1430 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tLABEL, tok(@ts, @te - 2), @ts, @te - 1)\n p = p - 1; @cs = 766; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 227 then\n# line 1436 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 226 then\n# line 527 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 229 then\n# line 1433 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 228 then\n# line 1436 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 21 then\n# line 1436 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = p - 1; \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 235 then\n# line 1462 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION)\n @cs = 474; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 234 then\n# line 1468 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 233 then\n# line 527 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 245 then\n# line 1447 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tCONSTANT)\n @cs = (arg_or_cmdarg(cmd_state)); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 236 then\n# line 1451 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tIDENTIFIER)\n @cs = (arg_or_cmdarg(cmd_state)); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 241 then\n# line 1462 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 474; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 239 then\n# line 1465 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 244 then\n# line 1468 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 268 then\n# line 1528 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Unlike expr_beg as invoked in the next rule, do not warn\n p = @ts - 1\n \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 251 then\n# line 1546 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if tok(tm, tm + 1) == '/'.freeze\n # Ambiguous regexp literal.\n if @version < 30\n diagnostic :warning, :ambiguous_literal, nil, range(tm, tm + 1)\n else\n diagnostic :warning, :ambiguous_regexp, nil, range(tm, tm + 1)\n end\n end\n\n p = tm - 1\n \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 257 then\n# line 1574 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 24 then\n# line 1582 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 259 then\n# line 1591 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = tm - 1; \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 39 then\n# line 1602 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = @ts - 1\n \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 246 then\n# line 1616 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 247 then\n# line 527 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 258 then\n# line 1537 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 254 then\n# line 1563 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :warning, :ambiguous_prefix, { :prefix => tok(tm, @te) },\n range(tm, @te)\n\n p = tm - 1\n \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 256 then\n# line 1579 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 250 then\n# line 1602 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n p = @ts - 1\n \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 249 then\n# line 1607 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 267 then\n# line 1616 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 25 then\n# line 1607 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n\t\tend\n\twhen 41 then\n# line 1616 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 23 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 71 then\n\tbegin begin p = (( @te))-1; end\n\n if tok(tm, tm + 1) == '/'.freeze\n # Ambiguous regexp literal.\n if @version < 30\n diagnostic :warning, :ambiguous_literal, nil, range(tm, tm + 1)\n else\n diagnostic :warning, :ambiguous_regexp, nil, range(tm, tm + 1)\n end\n end\n\n p = tm - 1\n \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\twhen 72 then\n\tbegin begin p = (( @te))-1; end\n\n diagnostic :warning, :ambiguous_prefix, { :prefix => tok(tm, @te) },\n range(tm, @te)\n\n p = tm - 1\n \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\twhen 77 then\n\tbegin begin p = (( @te))-1; end\n\n p = @ts - 1\n \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\telse\n\tbegin begin p = (( @te))-1; end\nend\nend \n\t\t\tend\n\twhen 43 then\n# line 1652 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1\n \tbegin\n\t\t @cs = 474\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 272 then\n# line 527 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 273 then\n# line 1652 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 474\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 44 then\n# line 1652 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = @ts - 1\n \tbegin\n\t\t @cs = 474\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 42 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 84 then\n\tbegin begin p = (( @te))-1; end\n\n if @cond.active?\n emit(:kDO_COND, 'do'.freeze, @te - 2, @te)\n else\n emit(:kDO, 'do'.freeze, @te - 2, @te)\n end\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 85 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1\n \tbegin\n\t\t @cs = 474\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 283 then\n# line 1688 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_do(true)\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 276 then\n# line 1694 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 277 then\n# line 527 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 278 then\n# line 1691 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 281 then\n# line 1694 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 287 then\n# line 1718 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 286 then\n# line 527 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 295 then\n# line 1710 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 289 then\n# line 1712 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 293 then\n# line 1718 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 288 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 92 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 93 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 56 then\n# line 1733 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tUNARY_NUM, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = p - 1; @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 328 then\n# line 1750 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n type = delimiter = tok[0].chr\n p = p - 1; \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 320 then\n# line 1757 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n type, delimiter = @source_buffer.slice(@ts).chr, tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 54 then\n# line 1764 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n type, delimiter = tok[0..-2], tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 341 then\n# line 1839 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = p - 1; p = p - 1;\n emit(:tSYMBEG, tok(@ts, @ts + 1), @ts, @ts + 1)\n \tbegin\n\t\t @cs = 333\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 329 then\n# line 1847 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n type, delimiter = tok, tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 340 then\n# line 1855 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tSYMBOL, tok(@ts + 1, @ts + 2))\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 68 then\n# line 1869 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tSYMBOL, tok(@ts + 1), @ts)\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 351 then\n# line 1910 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n escape = { \" \" => '\\s', \"\\r\" => '\\r', \"\\n\" => '\\n', \"\\t\" => '\\t',\n \"\\v\" => '\\v', \"\\f\" => '\\f' }[@source_buffer.slice(@ts + 1)]\n diagnostic :warning, :invalid_escape_use, { :escape => escape }, range\n\n p = @ts - 1\n \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 350 then\n# line 1920 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n diagnostic :fatal, :incomplete_escape, nil, range(@ts, @ts + 1)\n end\n\t\tend\n\twhen 330 then\n# line 1979 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION_BEGIN)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 51 then\n# line 2000 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = p - 1;\n\n if version?(18)\n ident = tok(@ts, @te - 2)\n\n emit((@source_buffer.slice(@ts) =~ /[A-Z]/) ? :tCONSTANT : :tIDENTIFIER,\n ident, @ts, @te - 2)\n p = p - 1; # continue as a symbol\n\n if !@static_env.nil? && @static_env.declared?(ident)\n @cs = 781;\n else\n @cs = (arg_or_cmdarg(cmd_state));\n end\n else\n emit(:tLABEL, tok(@ts, @te - 2), @ts, @te - 1)\n @cs = 766;\n end\n\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 327 then\n# line 2038 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if @version >= 30\n if @lambda_stack.any? && @lambda_stack.last + 1 == @paren_nest\n # To reject `->(...)` like `->...`\n emit(:tDOT3)\n else\n emit(:tBDOT3)\n end\n elsif @version >= 27\n emit(:tBDOT3)\n else\n emit(:tDOT3)\n end\n\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 48 then\n# line 2074 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tIDENTIFIER, ident_tok, ident_ts, ident_te)\n p = ident_te - 1\n\n if !@static_env.nil? && @static_env.declared?(ident_tok) && @version < 25\n @cs = 446;\n else\n @cs = 505;\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 314 then\n# line 2093 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = @ts - 1\n @cs_before_block_comment = @cs\n \tbegin\n\t\t @cs = 185\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 55 then\n# line 2109 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1; \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 298 then\n# line 527 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 325 then\n# line 1733 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tUNARY_NUM, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = p - 1; @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 324 then\n# line 1740 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tSTAR, '*'.freeze)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 321 then\n# line 1764 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n type, delimiter = tok[0..-2], tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 319 then\n# line 1770 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :fatal, :string_eof, nil, range(@ts, @ts + 1)\n end\n\t\tend\n\twhen 348 then\n# line 1830 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :error, :unterminated_heredoc_id, nil, range(@ts, @ts + 1)\n end\n\t\tend\n\twhen 331 then\n# line 1869 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1), @ts)\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 344 then\n# line 1877 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @version >= 27\n diagnostic :error, diag_msg, { name: tok(tm, @te) }, range(tm, @te)\n else\n emit(:tCOLON, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = @ts\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 349 then\n# line 1920 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :fatal, :incomplete_escape, nil, range(@ts, @ts + 1)\n end\n\t\tend\n\twhen 355 then\n# line 1926 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n p = @ts - 1\n \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 322 then\n# line 1979 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION_BEGIN)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 326 then\n# line 2027 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @version >= 27\n emit(:tBDOT2)\n else\n emit(:tDOT2)\n end\n\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 301 then\n# line 1315 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg(cmd_state)); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 311 then\n# line 2090 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 313 then\n# line 2093 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n p = @ts - 1\n @cs_before_block_comment = @cs\n \tbegin\n\t\t @cs = 185\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 316 then\n# line 2109 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1; \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 53 then\n# line 1770 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n diagnostic :fatal, :string_eof, nil, range(@ts, @ts + 1)\n end\n\t\tend\n\twhen 72 then\n# line 1830 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n diagnostic :error, :unterminated_heredoc_id, nil, range(@ts, @ts + 1)\n end\n\t\tend\n\twhen 73 then\n# line 1897 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 47 then\n# line 1315 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg(cmd_state)); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 52 then\n# line 2090 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n\t\tend\n\twhen 67 then\n# line 2109 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = @ts - 1; \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 50 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 98 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tUNARY_NUM, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = p - 1; @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 105 then\n\tbegin begin p = (( @te))-1; end\n\n diagnostic :error, :unterminated_heredoc_id, nil, range(@ts, @ts + 1)\n end\n\twhen 116 then\n\tbegin begin p = (( @te))-1; end\n\n if @version >= 27\n emit(:tPIPE, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = p - 1;\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n p -= 2\n \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n end\n\twhen 120 then\n\tbegin begin p = (( @te))-1; end\n emit_table(PUNCTUATION_BEGIN)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 121 then\n\tbegin begin p = (( @te))-1; end\n emit(:kRESCUE, 'rescue'.freeze, @ts, tm)\n p = tm - 1\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 122 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS_BEGIN)\n @command_start = true\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 126 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1\n \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\twhen 127 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg(cmd_state)); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 131 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1; \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 386 then\n# line 2129 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 387 then\n# line 527 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 388 then\n# line 2117 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 392 then\n# line 2129 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 76 then\n# line 2139 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1\n \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 396 then\n# line 2144 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n \tbegin\n\t\t @cs = (push_literal(tok, tok, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 395 then\n# line 2154 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 394 then\n# line 527 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 398 then\n# line 2148 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 397 then\n# line 2154 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 75 then\n# line 2154 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 431 then\n# line 2165 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tLAMBDA, '->'.freeze, @ts, @ts + 2)\n\n @lambda_stack.push @paren_nest\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 85 then\n# line 2206 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:kCLASS, 'class'.freeze, @ts, @ts + 5)\n emit(:tLSHFT, '<<'.freeze, @te - 2, @te)\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 406 then\n# line 2342 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n type, delimiter = tok, tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts, nil, false, false, true))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 78 then\n# line 2360 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1; \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 328\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 427 then\n# line 2367 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION)\n @cs = 453; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 480 then\n# line 2391 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit_table(PUNCTUATION)\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 420 then\n# line 2400 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit_table(PUNCTUATION);\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 425 then\n# line 2435 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tOP_ASGN, tok(@ts, @te - 1))\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 411 then\n# line 2439 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tEH, '?'.freeze)\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 408 then\n# line 2458 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION)\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 410 then\n# line 2471 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tSEMI, ';'.freeze)\n @command_start = true\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 485 then\n# line 2475 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n diagnostic :error, :bare_backslash, nil, range(@ts, @ts + 1)\n p = p - 1;\n end\n\t\tend\n\twhen 405 then\n# line 2481 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n diagnostic :fatal, :unexpected, { :character => tok.inspect[1..-2] }\n end\n\t\tend\n\twhen 404 then\n# line 527 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 496 then\n# line 2202 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(KEYWORDS)\n @cs = 333; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 494 then\n# line 2206 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:kCLASS, 'class'.freeze, @ts, @ts + 5)\n emit(:tLSHFT, '<<'.freeze, @te - 2, @te)\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 493 then\n# line 2217 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(KEYWORDS)\n @command_start = true\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 435 then\n# line 2292 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :error, :no_dot_digit_literal\n end\n\t\tend\n\twhen 482 then\n# line 2352 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tCONSTANT)\n @cs = (arg_or_cmdarg(cmd_state)); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 424 then\n# line 2360 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1; \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 328\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 432 then\n# line 2367 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 453; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 488 then\n# line 1315 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg(cmd_state)); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 430 then\n# line 2391 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit_table(PUNCTUATION)\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 426 then\n# line 2400 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit_table(PUNCTUATION);\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 419 then\n# line 2406 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 433 then\n# line 2458 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 417 then\n# line 2465 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 423 then\n# line 2481 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :fatal, :unexpected, { :character => tok.inspect[1..-2] }\n end\n\t\tend\n\twhen 83 then\n# line 2265 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 79 then\n# line 2292 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n diagnostic :error, :no_dot_digit_literal\n end\n\t\tend\n\twhen 82 then\n# line 2324 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 77 then\n# line 2481 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n diagnostic :fatal, :unexpected, { :character => tok.inspect[1..-2] }\n end\n\t\tend\n\twhen 80 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 144 then\n\tbegin begin p = (( @te))-1; end\n\n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n\n if tok == '{'.freeze\n emit(:tLAMBEG, '{'.freeze)\n else # 'do'\n emit(:kDO_LAMBDA, 'do'.freeze)\n end\n else\n if tok == '{'.freeze\n emit(:tLCURLY, '{'.freeze)\n else # 'do'\n emit_do\n end\n end\n if tok == '{'.freeze\n @paren_nest += 1\n end\n @command_start = true\n\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 145 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 333; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 146 then\n\tbegin begin p = (( @te))-1; end\n emit(:kCLASS, 'class'.freeze, @ts, @ts + 5)\n emit(:tLSHFT, '<<'.freeze, @te - 2, @te)\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 147 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 148 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @command_start = true\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 149 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 150 then\n\tbegin begin p = (( @te))-1; end\n\n emit_table(KEYWORDS)\n\n if version?(18) && tok == 'not'.freeze\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = 474; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 151 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18)\n emit(:tIDENTIFIER)\n\n unless !@static_env.nil? && @static_env.declared?(tok)\n @cs = (arg_or_cmdarg(cmd_state));\n end\n else\n emit(:k__ENCODING__, '__ENCODING__'.freeze)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 152 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 153 then\n\tbegin begin p = (( @te))-1; end\n\n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 155 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18, 19, 20)\n diagnostic :error,\n :trailing_in_number, { :character => tok(@te - 1, @te) },\n range(@te - 1, @te)\n else\n emit(:tINTEGER, tok(@ts, @te - 1).to_i, @ts, @te - 1)\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 156 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18, 19, 20)\n diagnostic :error,\n :trailing_in_number, { :character => tok(@te - 1, @te) },\n range(@te - 1, @te)\n else\n emit(:tFLOAT, tok(@ts, @te - 1).to_f, @ts, @te - 1)\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 157 then\n\tbegin begin p = (( @te))-1; end\n\n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 159 then\n\tbegin begin p = (( @te))-1; end\n emit(:tCONSTANT)\n @cs = (arg_or_cmdarg(cmd_state)); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 163 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg(cmd_state)); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 164 then\n\tbegin begin p = (( @te))-1; end\n\n if tm == @te\n # Suffix was consumed, e.g. foo!\n emit(:tFID)\n else\n # Suffix was not consumed, e.g. foo!=\n emit(:tIDENTIFIER, tok(@ts, tm), @ts, tm)\n p = tm - 1\n end\n @cs = 474; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 166 then\n\tbegin begin p = (( @te))-1; end\n\n emit_table(PUNCTUATION);\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 167 then\n\tbegin begin p = (( @te))-1; end\n emit_table(PUNCTUATION)\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 95 then\n# line 2511 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = tm - 1; \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 501 then\n# line 2514 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tNL, nil, @newline_s, @newline_s + 1)\n p = p - 1; @cs = 185; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 504 then\n# line 2497 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @version < 27\n # Ruby before 2.7 doesn't support comments before leading dot.\n # If a line after \"a\" starts with a comment then \"a\" is a self-contained statement.\n # So in that case we emit a special tNL token and start reading the\n # next line as a separate statement.\n #\n # Note: block comments before leading dot are not supported on any version of Ruby.\n emit(:tNL, nil, @newline_s, @newline_s + 1)\n p = p - 1; @cs = 185; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 503 then\n# line 2514 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tNL, nil, @newline_s, @newline_s + 1)\n p = p - 1; @cs = 185; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 90 then\n# line 2497 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n if @version < 27\n # Ruby before 2.7 doesn't support comments before leading dot.\n # If a line after \"a\" starts with a comment then \"a\" is a self-contained statement.\n # So in that case we emit a special tNL token and start reading the\n # next line as a separate statement.\n #\n # Note: block comments before leading dot are not supported on any version of Ruby.\n emit(:tNL, nil, @newline_s, @newline_s + 1)\n p = p - 1; @cs = 185; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 86 then\n# line 2514 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin emit(:tNL, nil, @newline_s, @newline_s + 1)\n p = p - 1; @cs = 185; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 91 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 180 then\n\tbegin begin p = (( @te))-1; end\n\n if @version < 27\n # Ruby before 2.7 doesn't support comments before leading dot.\n # If a line after \"a\" starts with a comment then \"a\" is a self-contained statement.\n # So in that case we emit a special tNL token and start reading the\n # next line as a separate statement.\n #\n # Note: block comments before leading dot are not supported on any version of Ruby.\n emit(:tNL, nil, @newline_s, @newline_s + 1)\n p = p - 1; @cs = 185; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 182 then\n\tbegin begin p = (( @te))-1; end\n emit(:tNL, nil, @newline_s, @newline_s + 1)\n p = p - 1; @cs = 185; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 507 then\n# line 2524 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit_comment(@eq_begin_s, @te)\n \tbegin\n\t\t @cs = (@cs_before_block_comment)\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 506 then\n# line 2532 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :fatal, :embedded_document, nil,\n range(@eq_begin_s, @eq_begin_s + '=begin'.length)\n end\n\t\tend\n\twhen 106 then\n# line 2542 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin @eq_begin_s = @ts\n \tbegin\n\t\t @cs = 967\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 2 then\n# line 2546 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = pe - 3 end\n\t\tend\n\twhen 98 then\n# line 2549 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin cmd_state = true; p = p - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 99 then\n# line 527 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 100 then\n# line 2539 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 105 then\n# line 2542 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin @eq_begin_s = @ts\n \tbegin\n\t\t @cs = 967\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 104 then\n# line 2549 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin cmd_state = true; p = p - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 1 then\n# line 2549 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin cmd_state = true; p = p - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 66 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n\twhen 110 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 945 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 140 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 945 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 168 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 945 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 174 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 945 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 180 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 945 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 187 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 945 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 190 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 945 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 198 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 945 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 269 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1528 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Unlike expr_beg as invoked in the next rule, do not warn\n p = @ts - 1\n \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 260 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1591 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = tm - 1; \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 252 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1602 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = @ts - 1\n \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 352 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1910 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n escape = { \" \" => '\\s', \"\\r\" => '\\r', \"\\n\" => '\\n', \"\\t\" => '\\t',\n \"\\v\" => '\\v', \"\\f\" => '\\f' }[@source_buffer.slice(@ts + 1)]\n diagnostic :warning, :invalid_escape_use, { :escape => escape }, range\n\n p = @ts - 1\n \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 315 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 2093 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = @ts - 1\n @cs_before_block_comment = @cs\n \tbegin\n\t\t @cs = 185\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 434 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 2447 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if @paren_nest == 0\n diagnostic :warning, :triple_dot_at_eol, nil, range(@ts, @te - 1)\n end\n\n emit(:tDOT3, '...'.freeze, @ts, @te - 1)\n p = p - 1;\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 508 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 2524 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit_comment(@eq_begin_s, @te)\n \tbegin\n\t\t @cs = (@cs_before_block_comment)\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 505 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 2529 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n\t\tend\n\twhen 107 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 2542 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin @eq_begin_s = @ts\n \tbegin\n\t\t @cs = 967\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 3 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 2546 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = pe - 3 end\n\t\tend\n\twhen 456 then\n# line 635 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tRATIONAL, Rational(chars)) } \t\tend\n# line 2265 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 454 then\n# line 636 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tIMAGINARY, Complex(0, chars)) } \t\tend\n# line 2265 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 459 then\n# line 637 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tIMAGINARY, Complex(0, Rational(chars))) } \t\tend\n# line 2265 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 457 then\n# line 638 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars, @ts, @te - 2); p -= 2 } \t\tend\n# line 2265 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 455 then\n# line 639 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars, @ts, @te - 2); p -= 2 } \t\tend\n# line 2265 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 458 then\n# line 640 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars, @ts, @te - 6); p -= 6 } \t\tend\n# line 2265 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 447 then\n# line 644 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tIMAGINARY, Complex(0, Float(chars))) } \t\tend\n# line 2324 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 448 then\n# line 645 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tFLOAT, Float(chars), @ts, @te - 2); p -= 2 } \t\tend\n# line 2324 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 449 then\n# line 649 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tRATIONAL, Rational(chars)) } \t\tend\n# line 2324 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 451 then\n# line 650 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tIMAGINARY, Complex(0, Rational(chars))) } \t\tend\n# line 2324 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 450 then\n# line 651 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tFLOAT, Float(chars), @ts, @te - 6); p -= 6 } \t\tend\n# line 2324 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 134 then\n# line 666 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = \"\"\n\n codepoints = tok(@escape_s + 2, p - 1)\n codepoint_s = @escape_s + 2\n\n if @version < 24\n if codepoints.start_with?(\" \") || codepoints.start_with?(\"\\t\")\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(@escape_s + 2, @escape_s + 3)\n end\n\n if spaces_p = codepoints.index(/[ \\t]{2}/)\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(codepoint_s + spaces_p + 1, codepoint_s + spaces_p + 2)\n end\n\n if codepoints.end_with?(\" \") || codepoints.end_with?(\"\\t\")\n diagnostic :fatal, :invalid_unicode_escape, nil, range(p - 1, p)\n end\n end\n\n codepoints.scan(/([0-9a-fA-F]+)|([ \\t]+)/).each do |(codepoint_str, spaces)|\n if spaces\n codepoint_s += spaces.length\n else\n codepoint = codepoint_str.to_i(16)\n\n if codepoint >= 0x110000\n diagnostic :error, :unicode_point_too_large, nil,\n range(codepoint_s, codepoint_s + codepoint_str.length)\n break\n end\n\n @escape += codepoint.chr(Encoding::UTF_8)\n codepoint_s += codepoint_str.length\n end\n end\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 162 then\n# line 666 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = \"\"\n\n codepoints = tok(@escape_s + 2, p - 1)\n codepoint_s = @escape_s + 2\n\n if @version < 24\n if codepoints.start_with?(\" \") || codepoints.start_with?(\"\\t\")\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(@escape_s + 2, @escape_s + 3)\n end\n\n if spaces_p = codepoints.index(/[ \\t]{2}/)\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(codepoint_s + spaces_p + 1, codepoint_s + spaces_p + 2)\n end\n\n if codepoints.end_with?(\" \") || codepoints.end_with?(\"\\t\")\n diagnostic :fatal, :invalid_unicode_escape, nil, range(p - 1, p)\n end\n end\n\n codepoints.scan(/([0-9a-fA-F]+)|([ \\t]+)/).each do |(codepoint_str, spaces)|\n if spaces\n codepoint_s += spaces.length\n else\n codepoint = codepoint_str.to_i(16)\n\n if codepoint >= 0x110000\n diagnostic :error, :unicode_point_too_large, nil,\n range(codepoint_s, codepoint_s + codepoint_str.length)\n break\n end\n\n @escape += codepoint.chr(Encoding::UTF_8)\n codepoint_s += codepoint_str.length\n end\n end\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 373 then\n# line 666 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = \"\"\n\n codepoints = tok(@escape_s + 2, p - 1)\n codepoint_s = @escape_s + 2\n\n if @version < 24\n if codepoints.start_with?(\" \") || codepoints.start_with?(\"\\t\")\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(@escape_s + 2, @escape_s + 3)\n end\n\n if spaces_p = codepoints.index(/[ \\t]{2}/)\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(codepoint_s + spaces_p + 1, codepoint_s + spaces_p + 2)\n end\n\n if codepoints.end_with?(\" \") || codepoints.end_with?(\"\\t\")\n diagnostic :fatal, :invalid_unicode_escape, nil, range(p - 1, p)\n end\n end\n\n codepoints.scan(/([0-9a-fA-F]+)|([ \\t]+)/).each do |(codepoint_str, spaces)|\n if spaces\n codepoint_s += spaces.length\n else\n codepoint = codepoint_str.to_i(16)\n\n if codepoint >= 0x110000\n diagnostic :error, :unicode_point_too_large, nil,\n range(codepoint_s, codepoint_s + codepoint_str.length)\n break\n end\n\n @escape += codepoint.chr(Encoding::UTF_8)\n codepoint_s += codepoint_str.length\n end\n end\n \t\tend\n# line 1897 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 118 then\n# line 706 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 146 then\n# line 706 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 357 then\n# line 706 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 1897 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 121 then\n# line 713 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_escape\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 149 then\n# line 713 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_escape\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 360 then\n# line 713 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_escape\n \t\tend\n# line 1897 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 123 then\n# line 740 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 151 then\n# line 740 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 362 then\n# line 740 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 1897 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 120 then\n# line 747 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s, p).to_i(8) % 0x100) \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 148 then\n# line 747 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s, p).to_i(8) % 0x100) \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 359 then\n# line 747 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s, p).to_i(8) % 0x100) \t\tend\n# line 1897 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 137 then\n# line 751 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s + 1, p).to_i(16)) \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 165 then\n# line 751 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s + 1, p).to_i(16)) \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 376 then\n# line 751 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s + 1, p).to_i(16)) \t\tend\n# line 1897 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 136 then\n# line 755 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_hex_escape, nil, range(@escape_s - 1, p + 2)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 164 then\n# line 755 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_hex_escape, nil, range(@escape_s - 1, p + 2)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 375 then\n# line 755 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_hex_escape, nil, range(@escape_s - 1, p + 2)\n \t\tend\n# line 1897 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 131 then\n# line 761 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = tok(@escape_s + 1, p).to_i(16).chr(Encoding::UTF_8) \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 159 then\n# line 761 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = tok(@escape_s + 1, p).to_i(16).chr(Encoding::UTF_8) \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 370 then\n# line 761 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = tok(@escape_s + 1, p).to_i(16).chr(Encoding::UTF_8) \t\tend\n# line 1897 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 130 then\n# line 765 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 158 then\n# line 765 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 369 then\n# line 765 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 1897 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 135 then\n# line 771 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 163 then\n# line 771 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 374 then\n# line 771 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 1897 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 132 then\n# line 785 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :unterminated_unicode, nil, range(p - 1, p)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 160 then\n# line 785 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :unterminated_unicode, nil, range(p - 1, p)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 371 then\n# line 785 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :unterminated_unicode, nil, range(p - 1, p)\n \t\tend\n# line 1897 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 119 then\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 147 then\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 358 then\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 1897 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 171 then\n# line 817 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 176 then\n# line 817 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 57 then\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 30 then\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1542 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 32 then\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1562 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 34 then\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1590 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 213 then\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1415 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 232 then\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1433 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 240 then\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1465 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 33 then\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1602 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = @ts - 1\n \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 271 then\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1607 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 263 then\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1613 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 282 then\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1691 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 294 then\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1712 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 290 then\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1715 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 58 then\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1733 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tUNARY_NUM, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = p - 1; @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 49 then\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2074 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tIDENTIFIER, ident_tok, ident_ts, ident_te)\n p = ident_te - 1\n\n if !@static_env.nil? && @static_env.declared?(ident_tok) && @version < 25\n @cs = 446;\n else\n @cs = 505;\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 312 then\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2090 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 393 then\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2117 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 389 then\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2120 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @in_kwarg\n p = p - 1; \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n else\n \tbegin\n\t\t @cs = 185\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 402 then\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2148 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 399 then\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2151 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \tbegin\n\t\t @cs = 185\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 486 then\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2465 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 418 then\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2468 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \tbegin\n\t\t @cs = 961\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 101 then\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2539 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 262 then\n# line 1059 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n current_literal = literal\n if current_literal\n current_literal.start_interp_brace\n end\n \t\tend\n# line 1509 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n emit(:tLAMBEG, '{'.freeze, @te - 1, @te)\n else\n emit(:tLCURLY, '{'.freeze, @te - 1, @te)\n end\n @command_start = true\n @paren_nest += 1\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 284 then\n# line 1059 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n current_literal = literal\n if current_literal\n current_literal.start_interp_brace\n end\n \t\tend\n# line 1675 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n emit(:tLAMBEG, '{'.freeze)\n else\n emit(:tLBRACE_ARG, '{'.freeze)\n end\n @paren_nest += 1\n @command_start = true\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 384 then\n# line 1059 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n current_literal = literal\n if current_literal\n current_literal.start_interp_brace\n end\n \t\tend\n# line 1955 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n @command_start = true\n emit(:tLAMBEG, '{'.freeze)\n else\n emit(:tLBRACE, '{'.freeze)\n end\n @paren_nest += 1\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 499 then\n# line 1059 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n current_literal = literal\n if current_literal\n current_literal.start_interp_brace\n end\n \t\tend\n# line 2173 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n\n if tok == '{'.freeze\n emit(:tLAMBEG, '{'.freeze)\n else # 'do'\n emit(:kDO_LAMBDA, 'do'.freeze)\n end\n else\n if tok == '{'.freeze\n emit(:tLCURLY, '{'.freeze)\n else # 'do'\n emit_do\n end\n end\n if tok == '{'.freeze\n @paren_nest += 1\n end\n @command_start = true\n\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 500 then\n# line 1068 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n current_literal = literal\n if current_literal\n if current_literal.end_interp_brace_and_try_closing\n if version?(18, 19)\n emit(:tRCURLY, '}'.freeze, p - 1, p)\n @cond.lexpop\n @cmdarg.lexpop\n else\n emit(:tSTRING_DEND, '}'.freeze, p - 1, p)\n end\n\n if current_literal.saved_herebody_s\n @herebody_s = current_literal.saved_herebody_s\n end\n\n\n p = p - 1;\n @cs = (next_state_for_literal(current_literal));\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\n @paren_nest -= 1\n \t\tend\n# line 2410 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit_table(PUNCTUATION)\n\n if @version < 24\n @cond.lexpop\n @cmdarg.lexpop\n else\n @cond.pop\n @cmdarg.pop\n end\n\n if tok == '}'.freeze || tok == ']'.freeze\n if @version >= 25\n @cs = 781;\n else\n @cs = 511;\n end\n else # )\n # fnext expr_endfn; ?\n end\n\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 60 then\n# line 1217 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n\twhen 64 then\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 216 then\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1415 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 231 then\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1433 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 243 then\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1465 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 265 then\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1610 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 280 then\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1691 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 292 then\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1712 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 318 then\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2090 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 391 then\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2117 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 401 then\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2148 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 422 then\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2465 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 103 then\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2539 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 237 then\n# line 1261 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1455 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tFID, tok(@ts, tm), @ts, tm)\n @cs = (arg_or_cmdarg(cmd_state)); p = tm - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 332 then\n# line 1261 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1861 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 303 then\n# line 1261 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 2063 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 415 then\n# line 1261 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 144 then\n\tbegin begin p = (( @te))-1; end\n\n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n\n if tok == '{'.freeze\n emit(:tLAMBEG, '{'.freeze)\n else # 'do'\n emit(:kDO_LAMBDA, 'do'.freeze)\n end\n else\n if tok == '{'.freeze\n emit(:tLCURLY, '{'.freeze)\n else # 'do'\n emit_do\n end\n end\n if tok == '{'.freeze\n @paren_nest += 1\n end\n @command_start = true\n\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 145 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 333; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 146 then\n\tbegin begin p = (( @te))-1; end\n emit(:kCLASS, 'class'.freeze, @ts, @ts + 5)\n emit(:tLSHFT, '<<'.freeze, @te - 2, @te)\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 147 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 148 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @command_start = true\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 149 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 150 then\n\tbegin begin p = (( @te))-1; end\n\n emit_table(KEYWORDS)\n\n if version?(18) && tok == 'not'.freeze\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = 474; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 151 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18)\n emit(:tIDENTIFIER)\n\n unless !@static_env.nil? && @static_env.declared?(tok)\n @cs = (arg_or_cmdarg(cmd_state));\n end\n else\n emit(:k__ENCODING__, '__ENCODING__'.freeze)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 152 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 153 then\n\tbegin begin p = (( @te))-1; end\n\n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 155 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18, 19, 20)\n diagnostic :error,\n :trailing_in_number, { :character => tok(@te - 1, @te) },\n range(@te - 1, @te)\n else\n emit(:tINTEGER, tok(@ts, @te - 1).to_i, @ts, @te - 1)\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 156 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18, 19, 20)\n diagnostic :error,\n :trailing_in_number, { :character => tok(@te - 1, @te) },\n range(@te - 1, @te)\n else\n emit(:tFLOAT, tok(@ts, @te - 1).to_f, @ts, @te - 1)\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 157 then\n\tbegin begin p = (( @te))-1; end\n\n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 159 then\n\tbegin begin p = (( @te))-1; end\n emit(:tCONSTANT)\n @cs = (arg_or_cmdarg(cmd_state)); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 163 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg(cmd_state)); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 164 then\n\tbegin begin p = (( @te))-1; end\n\n if tm == @te\n # Suffix was consumed, e.g. foo!\n emit(:tFID)\n else\n # Suffix was not consumed, e.g. foo!=\n emit(:tIDENTIFIER, tok(@ts, tm), @ts, tm)\n p = tm - 1\n end\n @cs = 474; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 166 then\n\tbegin begin p = (( @te))-1; end\n\n emit_table(PUNCTUATION);\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 167 then\n\tbegin begin p = (( @te))-1; end\n emit_table(PUNCTUATION)\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 238 then\n# line 1262 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1455 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tFID, tok(@ts, tm), @ts, tm)\n @cs = (arg_or_cmdarg(cmd_state)); p = tm - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 333 then\n# line 1262 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1861 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 305 then\n# line 1262 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 2063 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 416 then\n# line 1262 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 2374 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if tm == @te\n # Suffix was consumed, e.g. foo!\n emit(:tFID)\n else\n # Suffix was not consumed, e.g. foo!=\n emit(:tIDENTIFIER, tok(@ts, tm), @ts, tm)\n p = tm - 1\n end\n @cs = 474; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 334 then\n# line 1267 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1861 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 306 then\n# line 1267 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 2063 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 335 then\n# line 1268 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1861 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 307 then\n# line 1268 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 2063 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 339 then\n# line 1269 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1861 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 310 then\n# line 1269 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 2063 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 338 then\n# line 1270 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1861 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 309 then\n# line 1270 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 98 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tUNARY_NUM, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = p - 1; @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 105 then\n\tbegin begin p = (( @te))-1; end\n\n diagnostic :error, :unterminated_heredoc_id, nil, range(@ts, @ts + 1)\n end\n\twhen 116 then\n\tbegin begin p = (( @te))-1; end\n\n if @version >= 27\n emit(:tPIPE, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = p - 1;\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n p -= 2\n \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n end\n\twhen 120 then\n\tbegin begin p = (( @te))-1; end\n emit_table(PUNCTUATION_BEGIN)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 121 then\n\tbegin begin p = (( @te))-1; end\n emit(:kRESCUE, 'rescue'.freeze, @ts, tm)\n p = tm - 1\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 122 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS_BEGIN)\n @command_start = true\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 126 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1\n \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\twhen 127 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg(cmd_state)); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 131 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1; \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 336 then\n# line 1271 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 3 \t\tend\n# line 1861 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 308 then\n# line 1271 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 3 \t\tend\n# line 2063 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 337 then\n# line 1276 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1861 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 481 then\n# line 1281 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 2356 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tCONSTANT, tok(@ts, tm), @ts, tm)\n p = tm - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 261 then\n# line 1287 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n \t\tend\n# line 1503 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tLBRACK, '['.freeze, @te - 1, @te)\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 377 then\n# line 1287 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n \t\tend\n# line 1969 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tLBRACK, '['.freeze)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 484 then\n# line 1287 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n \t\tend\n# line 2443 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tLBRACK2, '['.freeze)\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 487 then\n# line 1293 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @paren_nest -= 1\n \t\tend\n# line 2410 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit_table(PUNCTUATION)\n\n if @version < 24\n @cond.lexpop\n @cmdarg.lexpop\n else\n @cond.pop\n @cmdarg.pop\n end\n\n if tok == '}'.freeze || tok == ']'.freeze\n if @version >= 25\n @cs = 781;\n else\n @cs = 511;\n end\n else # )\n # fnext expr_endfn; ?\n end\n\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 253 then\n# line 1300 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n\n if version?(18)\n @command_start = true\n end\n \t\tend\n# line 1484 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if version?(18)\n emit(:tLPAREN2, '('.freeze, @te - 1, @te)\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n emit(:tLPAREN_ARG, '('.freeze, @te - 1, @te)\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 266 then\n# line 1300 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n\n if version?(18)\n @command_start = true\n end\n \t\tend\n# line 1497 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tLPAREN2, '('.freeze)\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 274 then\n# line 1300 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n\n if version?(18)\n @command_start = true\n end\n \t\tend\n# line 1629 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tLPAREN_ARG, '('.freeze, @te - 1, @te)\n if version?(18)\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 323 then\n# line 1300 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n\n if version?(18)\n @command_start = true\n end\n \t\tend\n# line 1974 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tLPAREN, '('.freeze)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 428 then\n# line 1300 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n\n if version?(18)\n @command_start = true\n end\n \t\tend\n# line 2406 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 429 then\n# line 1310 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @paren_nest -= 1\n \t\tend\n# line 2410 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit_table(PUNCTUATION)\n\n if @version < 24\n @cond.lexpop\n @cmdarg.lexpop\n else\n @cond.pop\n @cmdarg.pop\n end\n\n if tok == '}'.freeze || tok == ']'.freeze\n if @version >= 25\n @cs = 781;\n else\n @cs = 511;\n end\n else # )\n # fnext expr_endfn; ?\n end\n\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 71 then\n# line 1782 \"lib/parser/lexer.rl\"\n\t\tbegin\n heredoc_e = p \t\tend\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 347 then\n# line 1783 \"lib/parser/lexer.rl\"\n\t\tbegin\n new_herebody_s = p \t\tend\n# line 1784 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n tok(@ts, heredoc_e) =~ /^<<(-?)(~?)([\"'`]?)(.*)\\3$/m\n\n indent = !$1.empty? || !$2.empty?\n dedent_body = !$2.empty?\n type = $3.empty? ? '<<\"'.freeze : ('<<'.freeze + $3)\n delimiter = $4\n\n if @version >= 27\n if delimiter.count(\"\\n\") > 0 || delimiter.count(\"\\r\") > 0\n diagnostic :error, :unterminated_heredoc_id, nil, range(@ts, @ts + 1)\n end\n elsif @version >= 24\n if delimiter.count(\"\\n\") > 0\n if delimiter.end_with?(\"\\n\")\n diagnostic :warning, :heredoc_id_ends_with_nl, nil, range(@ts, @ts + 1)\n delimiter = delimiter.rstrip\n else\n diagnostic :fatal, :heredoc_id_has_newline, nil, range(@ts, @ts + 1)\n end\n end\n end\n\n if dedent_body && version?(18, 19, 20, 21, 22)\n emit(:tLSHFT, '<<'.freeze, @ts, @ts + 2)\n p = @ts + 1\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (push_literal(type, delimiter, @ts, heredoc_e, indent, dedent_body));\n\n @herebody_s ||= new_herebody_s\n p = @herebody_s - 1\n end\n end\n\t\tend\n\twhen 342 then\n# line 1874 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 1; diag_msg = :ivar_name \t\tend\n# line 1877 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @version >= 27\n diagnostic :error, diag_msg, { name: tok(tm, @te) }, range(tm, @te)\n else\n emit(:tCOLON, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = @ts\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 345 then\n# line 1875 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2; diag_msg = :cvar_name \t\tend\n# line 1877 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @version >= 27\n diagnostic :error, diag_msg, { name: tok(tm, @te) }, range(tm, @te)\n else\n emit(:tCOLON, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = @ts\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 353 then\n# line 1895 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = nil \t\tend\n# line 1897 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 380 then\n# line 1984 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1985 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:kRESCUE, 'rescue'.freeze, @ts, tm)\n p = tm - 1\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 471 then\n# line 2258 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 16; @num_digits_s = p \t\tend\n# line 2264 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 465 then\n# line 2259 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = p \t\tend\n# line 2264 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 468 then\n# line 2260 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = p \t\tend\n# line 2264 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 462 then\n# line 2261 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 2; @num_digits_s = p \t\tend\n# line 2264 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 477 then\n# line 2262 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n# line 2264 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 440 then\n# line 2263 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = @ts \t\tend\n# line 2264 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 478 then\n# line 2264 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 634 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n\twhen 89 then\n# line 2510 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 2511 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = tm - 1; \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 8 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 444 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2322 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 222 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1374 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 43;\t\tend\n\twhen 209 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1378 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 44;\t\tend\n\twhen 205 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1382 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 45;\t\tend\n\twhen 26 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1546 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 71;\t\tend\n\twhen 255 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1563 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 72;\t\tend\n\twhen 27 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1602 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 77;\t\tend\n\twhen 248 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1607 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 78;\t\tend\n\twhen 275 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1639 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 84;\t\tend\n\twhen 45 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1652 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 85;\t\tend\n\twhen 296 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1706 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 92;\t\tend\n\twhen 285 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1710 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 93;\t\tend\n\twhen 69 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1830 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 105;\t\tend\n\twhen 385 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1938 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 116;\t\tend\n\twhen 299 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1979 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 120;\t\tend\n\twhen 379 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1985 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 121;\t\tend\n\twhen 378 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1991 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 122;\t\tend\n\twhen 74 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2063 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 126;\t\tend\n\twhen 297 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1315 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 127;\t\tend\n\twhen 300 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2109 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 131;\t\tend\n\twhen 495 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2173 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 144;\t\tend\n\twhen 490 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2202 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 145;\t\tend\n\twhen 498 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2212 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 147;\t\tend\n\twhen 491 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2217 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 148;\t\tend\n\twhen 492 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2222 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 149;\t\tend\n\twhen 497 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2226 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 150;\t\tend\n\twhen 489 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2237 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 151;\t\tend\n\twhen 483 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2251 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 152;\t\tend\n\twhen 409 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2265 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 153;\t\tend\n\twhen 442 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2309 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 156;\t\tend\n\twhen 81 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2324 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 157;\t\tend\n\twhen 412 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2352 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 159;\t\tend\n\twhen 403 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1315 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 163;\t\tend\n\twhen 414 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2374 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 164;\t\tend\n\twhen 407 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2400 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 166;\t\tend\n\twhen 413 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2406 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 167;\t\tend\n\twhen 87 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2497 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 180;\t\tend\n\twhen 502 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2514 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 182;\t\tend\n\twhen 177 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 945 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n# line 817 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n\twhen 124 then\n# line 706 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 725 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 152 then\n# line 706 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 725 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 363 then\n# line 706 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 725 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 1897 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 129 then\n# line 706 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 729 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 157 then\n# line 706 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 729 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 368 then\n# line 706 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 729 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1897 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 122 then\n# line 717 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = @source_buffer.slice(p - 1).chr\n\n if @version >= 27 && ((0..8).include?(@escape.ord) || (14..31).include?(@escape.ord))\n diagnostic :fatal, :invalid_escape\n end\n \t\tend\n# line 725 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 150 then\n# line 717 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = @source_buffer.slice(p - 1).chr\n\n if @version >= 27 && ((0..8).include?(@escape.ord) || (14..31).include?(@escape.ord))\n diagnostic :fatal, :invalid_escape\n end\n \t\tend\n# line 725 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 361 then\n# line 717 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = @source_buffer.slice(p - 1).chr\n\n if @version >= 27 && ((0..8).include?(@escape.ord) || (14..31).include?(@escape.ord))\n diagnostic :fatal, :invalid_escape\n end\n \t\tend\n# line 725 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 1897 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 128 then\n# line 717 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = @source_buffer.slice(p - 1).chr\n\n if @version >= 27 && ((0..8).include?(@escape.ord) || (14..31).include?(@escape.ord))\n diagnostic :fatal, :invalid_escape\n end\n \t\tend\n# line 729 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 156 then\n# line 717 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = @source_buffer.slice(p - 1).chr\n\n if @version >= 27 && ((0..8).include?(@escape.ord) || (14..31).include?(@escape.ord))\n diagnostic :fatal, :invalid_escape\n end\n \t\tend\n# line 729 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 367 then\n# line 717 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = @source_buffer.slice(p - 1).chr\n\n if @version >= 27 && ((0..8).include?(@escape.ord) || (14..31).include?(@escape.ord))\n diagnostic :fatal, :invalid_escape\n end\n \t\tend\n# line 729 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1897 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 126 then\n# line 740 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 729 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 154 then\n# line 740 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 729 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 365 then\n# line 740 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 729 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1897 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 133 then\n# line 771 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 785 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :unterminated_unicode, nil, range(p - 1, p)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 161 then\n# line 771 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 785 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :unterminated_unicode, nil, range(p - 1, p)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 372 then\n# line 771 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 785 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :unterminated_unicode, nil, range(p - 1, p)\n \t\tend\n# line 1897 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 116 then\n# line 817 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 145 then\n# line 817 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 356 then\n# line 817 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 1897 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 61 then\n# line 1217 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 215 then\n# line 1217 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1415 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 230 then\n# line 1217 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1433 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 242 then\n# line 1217 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1465 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 264 then\n# line 1217 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1610 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \tbegin\n\t\t @cs = 781\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 279 then\n# line 1217 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1691 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 291 then\n# line 1217 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1712 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 317 then\n# line 1217 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2090 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 390 then\n# line 1217 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2117 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 400 then\n# line 1217 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2148 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 421 then\n# line 1217 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2465 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 102 then\n# line 1217 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2539 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 473 then\n# line 2262 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n# line 2264 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 634 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n\twhen 437 then\n# line 2263 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = @ts \t\tend\n# line 2264 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 634 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n\twhen 452 then\n# line 2264 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 634 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2265 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 445 then\n# line 2321 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 643 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tFLOAT, Float(chars)) } \t\tend\n# line 2324 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 441 then\n# line 2322 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 643 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tFLOAT, Float(chars)) } \t\tend\n# line 2324 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 270 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1607 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 78;\t\tend\n\twhen 35 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1602 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 77;\t\tend\n\twhen 46 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1652 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 85;\t\tend\n\twhen 94 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2497 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 180;\t\tend\n\twhen 65 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1733 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 98;\t\tend\n\twhen 84 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2206 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 146;\t\tend\n\twhen 93 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2497 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 180;\t\tend\n\twhen 37 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1562 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1563 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 72;\t\tend\n\twhen 382 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1984 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 2063 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 126;\t\tend\n\twhen 381 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1984 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1315 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 127;\t\tend\n\twhen 474 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2262 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n# line 2265 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 153;\t\tend\n\twhen 127 then\n# line 706 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 725 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 729 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 155 then\n# line 706 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 725 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 729 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 366 then\n# line 706 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 725 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 729 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1897 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 125 then\n# line 717 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = @source_buffer.slice(p - 1).chr\n\n if @version >= 27 && ((0..8).include?(@escape.ord) || (14..31).include?(@escape.ord))\n diagnostic :fatal, :invalid_escape\n end\n \t\tend\n# line 725 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 729 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 153 then\n# line 717 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = @source_buffer.slice(p - 1).chr\n\n if @version >= 27 && ((0..8).include?(@escape.ord) || (14..31).include?(@escape.ord))\n diagnostic :fatal, :invalid_escape\n end\n \t\tend\n# line 725 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 729 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 890 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 364 then\n# line 717 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = @source_buffer.slice(p - 1).chr\n\n if @version >= 27 && ((0..8).include?(@escape.ord) || (14..31).include?(@escape.ord))\n diagnostic :fatal, :invalid_escape\n end\n \t\tend\n# line 725 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 729 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1897 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 781; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 469 then\n# line 2258 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 16; @num_digits_s = p \t\tend\n# line 2264 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 634 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2265 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 463 then\n# line 2259 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = p \t\tend\n# line 2264 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 634 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2265 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 466 then\n# line 2260 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = p \t\tend\n# line 2264 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 634 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2265 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 460 then\n# line 2261 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 2; @num_digits_s = p \t\tend\n# line 2264 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 634 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2265 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 472 then\n# line 2262 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n# line 2264 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 634 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2265 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 436 then\n# line 2263 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = @ts \t\tend\n# line 2264 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 634 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2265 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 31 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 857 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1562 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1563 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 72;\t\tend\n\twhen 62 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1217 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1733 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 98;\t\tend\n\twhen 92 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1217 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1220 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2497 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 180;\t\tend\n\twhen 479 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2264 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 634 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2297 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 155;\t\tend\n\twhen 475 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2262 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n# line 2264 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 634 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2297 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 155;\t\tend\n\twhen 439 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2263 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = @ts \t\tend\n# line 2264 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 634 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2297 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 155;\t\tend\n# line 23534 \"lib/parser/lexer.rb\"\n\tend\n\tend\n\tend\n\tif _goto_level <= _again\n\tcase _lex_to_state_actions[ @cs] \n\twhen 96 then\n# line 1 \"NONE\"\n\t\tbegin\n @ts = nil;\t\tend\n# line 23544 \"lib/parser/lexer.rb\"\n\tend\n\n\tif @cs == 0\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\tp += 1\n\tif p != pe\n\t\t_goto_level = _resume\n\t\tnext\n\tend\n\tend\n\tif _goto_level <= _test_eof\n\tif p == eof\n\tif _lex_eof_trans[ @cs] > 0\n\t\t_trans = _lex_eof_trans[ @cs] - 1;\n\t\t_goto_level = _eof_trans\n\t\tnext;\n\tend\n\tend\n\n\tend\n\tif _goto_level <= _out\n\t\tbreak\n\tend\nend\n\tend\n\n# line 286 \"lib/parser/lexer.rl\"\n # %\n\n # Ragel creates a local variable called `testEof` but it doesn't use\n # it in any assignment. This dead code is here to swallow the warning.\n # It has no runtime cost because Ruby doesn't produce any instructions from it.\n if false\n testEof\n end\n\n @p = p\n\n if @token_queue.any?\n @token_queue.shift\n elsif @cs == klass.lex_error\n [ false, [ '$error'.freeze, range(p - 1, p) ] ]\n else\n eof = @source_pts.size\n [ false, [ '$eof'.freeze, range(eof, eof) ] ]\n end\n end", "def test_goto_position_backwards\n @buffer = Buffer.new 'hellow world'\n @buffer.fin\n @buffer.goto_position 5\n assert_eq @buffer.position, 5\n assert_eq @buffer.at, 'w'\n end", "def advance\n if @token_queue.any?\n return @token_queue.shift\n end\n\n # Ugly, but dependent on Ragel output. Consider refactoring it somehow.\n klass = self.class\n _lex_trans_keys = klass.send :_lex_trans_keys\n _lex_key_spans = klass.send :_lex_key_spans\n _lex_index_offsets = klass.send :_lex_index_offsets\n _lex_indicies = klass.send :_lex_indicies\n _lex_trans_targs = klass.send :_lex_trans_targs\n _lex_trans_actions = klass.send :_lex_trans_actions\n _lex_to_state_actions = klass.send :_lex_to_state_actions\n _lex_from_state_actions = klass.send :_lex_from_state_actions\n _lex_eof_trans = klass.send :_lex_eof_trans\n\n pe = @source_pts.size + 2\n p, eof = @p, pe\n\n cmd_state = @command_start\n @command_start = false\n\n \n# line 11330 \"lib/parser/lexer.rb\"\nbegin\n\ttestEof = false\n\t_slen, _trans, _keys, _inds, _acts, _nacts = nil\n\t_goto_level = 0\n\t_resume = 10\n\t_eof_trans = 15\n\t_again = 20\n\t_test_eof = 30\n\t_out = 40\n\twhile true\n\tif _goto_level <= 0\n\tif p == pe\n\t\t_goto_level = _test_eof\n\t\tnext\n\tend\n\tif @cs == 0\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\tend\n\tif _goto_level <= _resume\n\tcase _lex_from_state_actions[ @cs] \n\twhen 97 then\n# line 1 \"NONE\"\n\t\tbegin\n @ts = p\n\t\tend\n# line 11358 \"lib/parser/lexer.rb\"\n\tend\n\t_keys = @cs << 1\n\t_inds = _lex_index_offsets[ @cs]\n\t_slen = _lex_key_spans[ @cs]\n\t_wide = ( (@source_pts[p] || 0))\n\t_trans = if ( _slen > 0 && \n\t\t\t_lex_trans_keys[_keys] <= _wide && \n\t\t\t_wide <= _lex_trans_keys[_keys + 1] \n\t\t ) then\n\t\t\t_lex_indicies[ _inds + _wide - _lex_trans_keys[_keys] ] \n\t\t else \n\t\t\t_lex_indicies[ _inds + _slen ]\n\t\t end\n\tend\n\tif _goto_level <= _eof_trans\n\t @cs = _lex_trans_targs[_trans]\n\tif _lex_trans_actions[_trans] != 0\n\tcase _lex_trans_actions[_trans]\n\twhen 29 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 117 then\n# line 825 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n\twhen 30 then\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n\twhen 60 then\n# line 1229 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n\twhen 64 then\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n\twhen 310 then\n# line 1273 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 37 then\n# line 1566 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 39 then\n# line 1586 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 41 then\n# line 1614 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 71 then\n# line 1806 \"lib/parser/lexer.rl\"\n\t\tbegin\n heredoc_e = p \t\tend\n\twhen 349 then\n# line 1898 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 1; diag_msg = :ivar_name \t\tend\n\twhen 352 then\n# line 1899 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2; diag_msg = :cvar_name \t\tend\n\twhen 360 then\n# line 1919 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = nil \t\tend\n\twhen 392 then\n# line 2008 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 308 then\n# line 2115 \"lib/parser/lexer.rl\"\n\t\tbegin\n ident_tok = tok; ident_ts = @ts; ident_te = @te; \t\tend\n\twhen 479 then\n# line 2301 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 16; @num_digits_s = p \t\tend\n\twhen 473 then\n# line 2302 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = p \t\tend\n\twhen 476 then\n# line 2303 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = p \t\tend\n\twhen 470 then\n# line 2304 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 2; @num_digits_s = p \t\tend\n\twhen 485 then\n# line 2305 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n\twhen 447 then\n# line 2306 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = @ts \t\tend\n\twhen 462 then\n# line 2307 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 455 then\n# line 2364 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 452 then\n# line 2365 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 89 then\n# line 2575 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 7 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n\twhen 113 then\n# line 1106 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DBEG, '#{'.freeze)\n\n if current_literal.heredoc?\n current_literal.saved_herebody_s = @herebody_s\n @herebody_s = nil\n end\n\n current_literal.start_interp_brace\n @command_start = true\n @cs = 803;\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 5 then\n# line 1032 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 348\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 109 then\n# line 957 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 108 then\n# line 874 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 796;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 115 then\n# line 1048 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @version >= 27\n literal.extend_string(tok, @ts, @te)\n else\n message = tok.start_with?('#@@') ? :cvar_name : :ivar_name\n diagnostic :error, message, { :name => tok(@ts + 1, @te) }, range(@ts + 1, @te)\n end\n end\n\t\tend\n\twhen 114 then\n# line 1032 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 348\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 111 then\n# line 1019 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n literal.extend_space @ts, @te\n end\n\t\tend\n\twhen 112 then\n# line 874 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 796;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 6 then\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 4 then\n# line 874 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 796;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 145 then\n# line 1106 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DBEG, '#{'.freeze)\n\n if current_literal.heredoc?\n current_literal.saved_herebody_s = @herebody_s\n @herebody_s = nil\n end\n\n current_literal.start_interp_brace\n @command_start = true\n @cs = 803;\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 10 then\n# line 1032 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 348\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 142 then\n# line 957 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 141 then\n# line 874 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 796;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 147 then\n# line 1048 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @version >= 27\n literal.extend_string(tok, @ts, @te)\n else\n message = tok.start_with?('#@@') ? :cvar_name : :ivar_name\n diagnostic :error, message, { :name => tok(@ts + 1, @te) }, range(@ts + 1, @te)\n end\n end\n\t\tend\n\twhen 146 then\n# line 1032 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 348\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 144 then\n# line 874 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 796;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 11 then\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 9 then\n# line 874 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 796;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 173 then\n# line 957 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 172 then\n# line 874 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 796;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 175 then\n# line 1019 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n literal.extend_space @ts, @te\n end\n\t\tend\n\twhen 176 then\n# line 874 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 796;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 179 then\n# line 957 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 178 then\n# line 874 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 796;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 181 then\n# line 874 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 796;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 188 then\n# line 1106 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DBEG, '#{'.freeze)\n\n if current_literal.heredoc?\n current_literal.saved_herebody_s = @herebody_s\n @herebody_s = nil\n end\n\n current_literal.start_interp_brace\n @command_start = true\n @cs = 803;\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 13 then\n# line 1032 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 348\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 185 then\n# line 957 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 184 then\n# line 874 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 796;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 190 then\n# line 1048 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @version >= 27\n literal.extend_string(tok, @ts, @te)\n else\n message = tok.start_with?('#@@') ? :cvar_name : :ivar_name\n diagnostic :error, message, { :name => tok(@ts + 1, @te) }, range(@ts + 1, @te)\n end\n end\n\t\tend\n\twhen 189 then\n# line 1032 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 348\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 187 then\n# line 874 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 796;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 12 then\n# line 874 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 796;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 192 then\n# line 957 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 191 then\n# line 874 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 796;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 199 then\n# line 1106 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DBEG, '#{'.freeze)\n\n if current_literal.heredoc?\n current_literal.saved_herebody_s = @herebody_s\n @herebody_s = nil\n end\n\n current_literal.start_interp_brace\n @command_start = true\n @cs = 803;\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 15 then\n# line 1032 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 348\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 195 then\n# line 957 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 194 then\n# line 874 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 796;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 201 then\n# line 1048 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @version >= 27\n literal.extend_string(tok, @ts, @te)\n else\n message = tok.start_with?('#@@') ? :cvar_name : :ivar_name\n diagnostic :error, message, { :name => tok(@ts + 1, @te) }, range(@ts + 1, @te)\n end\n end\n\t\tend\n\twhen 200 then\n# line 1032 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 348\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 197 then\n# line 1019 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n literal.extend_space @ts, @te\n end\n\t\tend\n\twhen 198 then\n# line 874 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 796;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 14 then\n# line 874 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 796;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 203 then\n# line 957 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 202 then\n# line 874 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 796;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 205 then\n# line 1019 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n literal.extend_space @ts, @te\n end\n\t\tend\n\twhen 206 then\n# line 1203 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tREGEXP_OPT, tok(@ts, @te - 1), @ts, @te - 1)\n p = p - 1;\n \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 207 then\n# line 1190 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n unknown_options = tok.scan(/[^imxouesn]/)\n if unknown_options.any?\n diagnostic :error, :regexp_options,\n { :options => unknown_options.join }\n end\n\n emit(:tREGEXP_OPT)\n @cs = 811;\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 16 then\n# line 1342 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if tok =~ /^\\$([1-9][0-9]*)$/\n emit(:tNTH_REF, tok(@ts + 1).to_i)\n elsif tok =~ /^\\$([&`'+])$/\n emit(:tBACK_REF)\n else\n emit(:tGVAR)\n end\n\n @cs = (stack_pop); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 208 then\n# line 1342 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if tok =~ /^\\$([1-9][0-9]*)$/\n emit(:tNTH_REF, tok(@ts + 1).to_i)\n elsif tok =~ /^\\$([&`'+])$/\n emit(:tBACK_REF)\n else\n emit(:tGVAR)\n end\n\n @cs = (stack_pop); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 210 then\n# line 1355 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if tok =~ /^@@[0-9]/\n diagnostic :error, :cvar_name, { :name => tok }\n end\n\n emit(:tCVAR)\n @cs = (stack_pop); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 209 then\n# line 1365 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if tok =~ /^@[0-9]/\n diagnostic :error, :ivar_name, { :name => tok }\n end\n\n emit(:tIVAR)\n @cs = (stack_pop); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 231 then\n# line 1386 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(KEYWORDS_BEGIN);\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 217 then\n# line 1394 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tIDENTIFIER)\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 18 then\n# line 1398 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1\n @cs = 811; \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 348\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 214 then\n# line 1407 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION)\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 226 then\n# line 1411 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; p = p - 1; \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 20 then\n# line 1417 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if version?(23)\n type, delimiter = tok[0..-2], tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n else\n p = @ts - 1\n \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 213 then\n# line 1430 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 212 then\n# line 528 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 230 then\n# line 1386 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(KEYWORDS_BEGIN);\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 227 then\n# line 1390 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tCONSTANT)\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 229 then\n# line 1394 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tIDENTIFIER)\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 224 then\n# line 1398 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n @cs = 811; \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 348\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 220 then\n# line 1407 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 225 then\n# line 1414 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 564\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 218 then\n# line 1427 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 223 then\n# line 1430 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 19 then\n# line 1407 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin emit_table(PUNCTUATION)\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 17 then\n# line 1430 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = p - 1; \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 216 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 43 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS_BEGIN);\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 44 then\n\tbegin begin p = (( @te))-1; end\n emit(:tCONSTANT)\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 45 then\n\tbegin begin p = (( @te))-1; end\n emit(:tIDENTIFIER)\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 22 then\n# line 1442 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tLABEL, tok(@ts, @te - 2), @ts, @te - 1)\n p = p - 1; @cs = 796; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 233 then\n# line 1460 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 232 then\n# line 528 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 235 then\n# line 1457 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 234 then\n# line 1460 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 21 then\n# line 1460 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = p - 1; \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 241 then\n# line 1486 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION)\n @cs = 495; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 240 then\n# line 1492 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 239 then\n# line 528 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 251 then\n# line 1471 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tCONSTANT)\n @cs = (arg_or_cmdarg(cmd_state)); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 242 then\n# line 1475 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tIDENTIFIER)\n @cs = (arg_or_cmdarg(cmd_state)); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 247 then\n# line 1486 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 495; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 245 then\n# line 1489 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 250 then\n# line 1492 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 274 then\n# line 1552 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Unlike expr_beg as invoked in the next rule, do not warn\n p = @ts - 1\n \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 257 then\n# line 1570 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if tok(tm, tm + 1) == '/'.freeze\n # Ambiguous regexp literal.\n if @version < 30\n diagnostic :warning, :ambiguous_literal, nil, range(tm, tm + 1)\n else\n diagnostic :warning, :ambiguous_regexp, nil, range(tm, tm + 1)\n end\n end\n\n p = tm - 1\n \tbegin\n\t\t @cs = 564\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 263 then\n# line 1598 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; p = p - 1; \tbegin\n\t\t @cs = 564\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 25 then\n# line 1606 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1; \tbegin\n\t\t @cs = 564\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 265 then\n# line 1615 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = tm - 1; \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 40 then\n# line 1626 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = @ts - 1\n \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 252 then\n# line 1640 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 564\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 253 then\n# line 528 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 264 then\n# line 1561 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 564\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 260 then\n# line 1587 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :warning, :ambiguous_prefix, { :prefix => tok(tm, @te) },\n range(tm, @te)\n\n p = tm - 1\n \tbegin\n\t\t @cs = 564\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 262 then\n# line 1603 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 564\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 256 then\n# line 1626 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n p = @ts - 1\n \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 255 then\n# line 1631 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 273 then\n# line 1640 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 564\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 26 then\n# line 1631 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n\t\tend\n\twhen 42 then\n# line 1640 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = p - 1; \tbegin\n\t\t @cs = 564\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 24 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 72 then\n\tbegin begin p = (( @te))-1; end\n\n if tok(tm, tm + 1) == '/'.freeze\n # Ambiguous regexp literal.\n if @version < 30\n diagnostic :warning, :ambiguous_literal, nil, range(tm, tm + 1)\n else\n diagnostic :warning, :ambiguous_regexp, nil, range(tm, tm + 1)\n end\n end\n\n p = tm - 1\n \tbegin\n\t\t @cs = 564\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\twhen 73 then\n\tbegin begin p = (( @te))-1; end\n\n diagnostic :warning, :ambiguous_prefix, { :prefix => tok(tm, @te) },\n range(tm, @te)\n\n p = tm - 1\n \tbegin\n\t\t @cs = 564\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\twhen 78 then\n\tbegin begin p = (( @te))-1; end\n\n p = @ts - 1\n \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\telse\n\tbegin begin p = (( @te))-1; end\nend\nend \n\t\t\tend\n\twhen 44 then\n# line 1676 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1\n \tbegin\n\t\t @cs = 495\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 278 then\n# line 528 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 279 then\n# line 1676 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 495\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 45 then\n# line 1676 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = @ts - 1\n \tbegin\n\t\t @cs = 495\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 43 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 85 then\n\tbegin begin p = (( @te))-1; end\n\n if @cond.active?\n emit(:kDO_COND, 'do'.freeze, @te - 2, @te)\n else\n emit(:kDO, 'do'.freeze, @te - 2, @te)\n end\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 86 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1\n \tbegin\n\t\t @cs = 495\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 289 then\n# line 1712 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_do(true)\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 282 then\n# line 1718 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 283 then\n# line 528 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 284 then\n# line 1715 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 287 then\n# line 1718 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 293 then\n# line 1742 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 564\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 292 then\n# line 528 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 301 then\n# line 1734 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1; \tbegin\n\t\t @cs = 564\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 295 then\n# line 1736 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 299 then\n# line 1742 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 564\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 294 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 93 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 94 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1; \tbegin\n\t\t @cs = 564\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 57 then\n# line 1757 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tUNARY_NUM, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = p - 1; @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 334 then\n# line 1774 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n type = delimiter = tok[0].chr\n p = p - 1; \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 326 then\n# line 1781 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n type, delimiter = @source_buffer.slice(@ts).chr, tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 55 then\n# line 1788 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n type, delimiter = tok[0..-2], tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 347 then\n# line 1863 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = p - 1; p = p - 1;\n emit(:tSYMBEG, tok(@ts, @ts + 1), @ts, @ts + 1)\n \tbegin\n\t\t @cs = 353\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 335 then\n# line 1871 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n type, delimiter = tok, tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 346 then\n# line 1879 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tSYMBOL, tok(@ts + 1, @ts + 2))\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 69 then\n# line 1893 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tSYMBOL, tok(@ts + 1), @ts)\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 357 then\n# line 1934 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n escape = { \" \" => '\\s', \"\\r\" => '\\r', \"\\n\" => '\\n', \"\\t\" => '\\t',\n \"\\v\" => '\\v', \"\\f\" => '\\f' }[@source_buffer.slice(@ts + 1)]\n diagnostic :warning, :invalid_escape_use, { :escape => escape }, range\n\n p = @ts - 1\n \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 356 then\n# line 1944 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n diagnostic :fatal, :incomplete_escape, nil, range(@ts, @ts + 1)\n end\n\t\tend\n\twhen 336 then\n# line 2003 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION_BEGIN)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 52 then\n# line 2024 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = p - 1;\n\n if version?(18)\n ident = tok(@ts, @te - 2)\n\n emit((@source_buffer.slice(@ts) =~ /[A-Z]/) ? :tCONSTANT : :tIDENTIFIER,\n ident, @ts, @te - 2)\n p = p - 1; # continue as a symbol\n\n if !@static_env.nil? && @static_env.declared?(ident)\n @cs = 811;\n else\n @cs = (arg_or_cmdarg(cmd_state));\n end\n else\n emit(:tLABEL, tok(@ts, @te - 2), @ts, @te - 1)\n @cs = 796;\n end\n\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 49 then\n# line 2117 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tIDENTIFIER, ident_tok, ident_ts, ident_te)\n p = ident_te - 1\n\n if !@static_env.nil? && @static_env.declared?(ident_tok) && @version < 25\n @cs = 466;\n else\n @cs = 526;\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 320 then\n# line 2136 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = @ts - 1\n @cs_before_block_comment = @cs\n \tbegin\n\t\t @cs = 187\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 56 then\n# line 2152 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1; \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 304 then\n# line 528 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 330 then\n# line 1757 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tUNARY_NUM, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = p - 1; @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 329 then\n# line 1764 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tSTAR, '*'.freeze)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 325 then\n# line 1794 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :fatal, :string_eof, nil, range(@ts, @ts + 1)\n end\n\t\tend\n\twhen 354 then\n# line 1854 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :error, :unterminated_heredoc_id, nil, range(@ts, @ts + 1)\n end\n\t\tend\n\twhen 337 then\n# line 1893 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1), @ts)\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 350 then\n# line 1901 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @version >= 27\n diagnostic :error, diag_msg, { name: tok(tm, @te) }, range(tm, @te)\n else\n emit(:tCOLON, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = @ts\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 355 then\n# line 1944 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :fatal, :incomplete_escape, nil, range(@ts, @ts + 1)\n end\n\t\tend\n\twhen 361 then\n# line 1950 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n p = @ts - 1\n \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 327 then\n# line 2003 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION_BEGIN)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 331 then\n# line 2051 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @version >= 27\n emit(:tBDOT2)\n else\n emit(:tDOT2)\n end\n\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 332 then\n# line 2062 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n # Here we scan and conditionally emit \"\\n\":\n # + if it's there\n # + and emitted we do nothing\n # + and not emitted we return `p` to \"\\n\" to process it on the next scan\n # + if it's not there we do nothing\n followed_by_nl = @te - 1 == @newline_s\n nl_emitted = false\n dots_te = followed_by_nl ? @te - 1 : @te\n\n if @version >= 30\n if @lambda_stack.any? && @lambda_stack.last + 1 == @paren_nest\n # To reject `->(...)` like `->...`\n emit(:tDOT3, '...'.freeze, @ts, dots_te)\n else\n emit(:tBDOT3, '...'.freeze, @ts, dots_te)\n\n if @version >= 31 && followed_by_nl && @context.in_def_open_args?\n emit(:tNL, @te - 1, @te)\n nl_emitted = true\n end\n end\n elsif @version >= 27\n emit(:tBDOT3, '...'.freeze, @ts, dots_te)\n else\n emit(:tDOT3, '...'.freeze, @ts, dots_te)\n end\n\n if followed_by_nl && !nl_emitted\n # return \"\\n\" to process it on the next scan\n p = p - 1;\n end\n\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 307 then\n# line 1327 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg(cmd_state)); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 317 then\n# line 2133 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 319 then\n# line 2136 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n p = @ts - 1\n @cs_before_block_comment = @cs\n \tbegin\n\t\t @cs = 187\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 322 then\n# line 2152 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1; \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 54 then\n# line 1794 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n diagnostic :fatal, :string_eof, nil, range(@ts, @ts + 1)\n end\n\t\tend\n\twhen 73 then\n# line 1854 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n diagnostic :error, :unterminated_heredoc_id, nil, range(@ts, @ts + 1)\n end\n\t\tend\n\twhen 74 then\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 48 then\n# line 1327 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg(cmd_state)); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 53 then\n# line 2133 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n\t\tend\n\twhen 68 then\n# line 2152 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = @ts - 1; \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 51 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 99 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tUNARY_NUM, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = p - 1; @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 106 then\n\tbegin begin p = (( @te))-1; end\n\n diagnostic :error, :unterminated_heredoc_id, nil, range(@ts, @ts + 1)\n end\n\twhen 117 then\n\tbegin begin p = (( @te))-1; end\n\n if @version >= 27\n emit(:tPIPE, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = p - 1;\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n p -= 2\n \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n end\n\twhen 121 then\n\tbegin begin p = (( @te))-1; end\n emit_table(PUNCTUATION_BEGIN)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 122 then\n\tbegin begin p = (( @te))-1; end\n emit(:kRESCUE, 'rescue'.freeze, @ts, tm)\n p = tm - 1\n @cs = 540; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 123 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS_BEGIN)\n @command_start = true\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 127 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1\n \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\twhen 128 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg(cmd_state)); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 132 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1; \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 395 then\n# line 2172 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 564\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 396 then\n# line 528 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 397 then\n# line 2160 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 401 then\n# line 2172 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 564\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 77 then\n# line 2182 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1\n \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 405 then\n# line 2187 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n \tbegin\n\t\t @cs = (push_literal(tok, tok, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 404 then\n# line 2197 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 564\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 403 then\n# line 528 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 407 then\n# line 2191 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 406 then\n# line 2197 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 564\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 76 then\n# line 2197 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = p - 1; \tbegin\n\t\t @cs = 564\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 440 then\n# line 2208 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tLAMBDA, '->'.freeze, @ts, @ts + 2)\n\n @lambda_stack.push @paren_nest\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 86 then\n# line 2249 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:kCLASS, 'class'.freeze, @ts, @ts + 5)\n emit(:tLSHFT, '<<'.freeze, @te - 2, @te)\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 415 then\n# line 2385 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n type, delimiter = tok, tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts, nil, false, false, true))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 79 then\n# line 2403 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1; \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 348\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 436 then\n# line 2410 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION)\n @cs = 474; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 489 then\n# line 2434 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit_table(PUNCTUATION)\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 429 then\n# line 2443 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit_table(PUNCTUATION);\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 434 then\n# line 2478 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tOP_ASGN, tok(@ts, @te - 1))\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 420 then\n# line 2482 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tEH, '?'.freeze)\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 417 then\n# line 2501 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION)\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 419 then\n# line 2514 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tSEMI, ';'.freeze)\n @command_start = true\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 494 then\n# line 2518 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n diagnostic :error, :bare_backslash, nil, range(@ts, @ts + 1)\n p = p - 1;\n end\n\t\tend\n\twhen 414 then\n# line 2524 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n diagnostic :fatal, :unexpected, { :character => tok.inspect[1..-2] }\n end\n\t\tend\n\twhen 413 then\n# line 528 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 505 then\n# line 2245 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(KEYWORDS)\n @cs = 353; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 503 then\n# line 2249 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:kCLASS, 'class'.freeze, @ts, @ts + 5)\n emit(:tLSHFT, '<<'.freeze, @te - 2, @te)\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 502 then\n# line 2260 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(KEYWORDS)\n @command_start = true\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 444 then\n# line 2335 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :error, :no_dot_digit_literal\n end\n\t\tend\n\twhen 491 then\n# line 2395 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tCONSTANT)\n @cs = (arg_or_cmdarg(cmd_state)); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 433 then\n# line 2403 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1; \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 348\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 441 then\n# line 2410 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 474; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 497 then\n# line 1327 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg(cmd_state)); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 439 then\n# line 2434 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit_table(PUNCTUATION)\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 435 then\n# line 2443 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit_table(PUNCTUATION);\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 428 then\n# line 2449 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 442 then\n# line 2501 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 426 then\n# line 2508 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 432 then\n# line 2524 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :fatal, :unexpected, { :character => tok.inspect[1..-2] }\n end\n\t\tend\n\twhen 84 then\n# line 2308 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 80 then\n# line 2335 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n diagnostic :error, :no_dot_digit_literal\n end\n\t\tend\n\twhen 83 then\n# line 2367 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 78 then\n# line 2524 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n diagnostic :fatal, :unexpected, { :character => tok.inspect[1..-2] }\n end\n\t\tend\n\twhen 81 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 145 then\n\tbegin begin p = (( @te))-1; end\n\n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n\n if tok == '{'.freeze\n emit(:tLAMBEG, '{'.freeze)\n else # 'do'\n emit(:kDO_LAMBDA, 'do'.freeze)\n end\n else\n if tok == '{'.freeze\n emit(:tLCURLY, '{'.freeze)\n else # 'do'\n emit_do\n end\n end\n if tok == '{'.freeze\n @paren_nest += 1\n end\n @command_start = true\n\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 146 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 353; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 147 then\n\tbegin begin p = (( @te))-1; end\n emit(:kCLASS, 'class'.freeze, @ts, @ts + 5)\n emit(:tLSHFT, '<<'.freeze, @te - 2, @te)\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 148 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 149 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @command_start = true\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 150 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 540; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 151 then\n\tbegin begin p = (( @te))-1; end\n\n emit_table(KEYWORDS)\n\n if version?(18) && tok == 'not'.freeze\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = 495; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 152 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18)\n emit(:tIDENTIFIER)\n\n unless !@static_env.nil? && @static_env.declared?(tok)\n @cs = (arg_or_cmdarg(cmd_state));\n end\n else\n emit(:k__ENCODING__, '__ENCODING__'.freeze)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 153 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 154 then\n\tbegin begin p = (( @te))-1; end\n\n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 156 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18, 19, 20)\n diagnostic :error,\n :trailing_in_number, { :character => tok(@te - 1, @te) },\n range(@te - 1, @te)\n else\n emit(:tINTEGER, tok(@ts, @te - 1).to_i, @ts, @te - 1)\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 157 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18, 19, 20)\n diagnostic :error,\n :trailing_in_number, { :character => tok(@te - 1, @te) },\n range(@te - 1, @te)\n else\n emit(:tFLOAT, tok(@ts, @te - 1).to_f, @ts, @te - 1)\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 158 then\n\tbegin begin p = (( @te))-1; end\n\n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 160 then\n\tbegin begin p = (( @te))-1; end\n emit(:tCONSTANT)\n @cs = (arg_or_cmdarg(cmd_state)); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 164 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg(cmd_state)); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 165 then\n\tbegin begin p = (( @te))-1; end\n\n if tm == @te\n # Suffix was consumed, e.g. foo!\n emit(:tFID)\n else\n # Suffix was not consumed, e.g. foo!=\n emit(:tIDENTIFIER, tok(@ts, tm), @ts, tm)\n p = tm - 1\n end\n @cs = 495; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 167 then\n\tbegin begin p = (( @te))-1; end\n\n emit_table(PUNCTUATION);\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 168 then\n\tbegin begin p = (( @te))-1; end\n emit_table(PUNCTUATION)\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 516 then\n# line 2565 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tNL, nil, @newline_s, @newline_s + 1)\n if @version < 27\n p = p - 1; @cs = 187; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n emit(:tBDOT3)\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 95 then\n# line 2576 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = tm - 1; \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 510 then\n# line 2579 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tNL, nil, @newline_s, @newline_s + 1)\n p = p - 1; @cs = 187; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 513 then\n# line 2540 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @version < 27\n # Ruby before 2.7 doesn't support comments before leading dot.\n # If a line after \"a\" starts with a comment then \"a\" is a self-contained statement.\n # So in that case we emit a special tNL token and start reading the\n # next line as a separate statement.\n #\n # Note: block comments before leading dot are not supported on any version of Ruby.\n emit(:tNL, nil, @newline_s, @newline_s + 1)\n p = p - 1; @cs = 187; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 515 then\n# line 2554 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tNL, nil, @newline_s, @newline_s + 1)\n if @version < 27\n p = p - 1; @cs = 187; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n emit(:tBDOT2)\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 514 then\n# line 2576 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = tm - 1; \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 512 then\n# line 2579 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tNL, nil, @newline_s, @newline_s + 1)\n p = p - 1; @cs = 187; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 90 then\n# line 2540 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n if @version < 27\n # Ruby before 2.7 doesn't support comments before leading dot.\n # If a line after \"a\" starts with a comment then \"a\" is a self-contained statement.\n # So in that case we emit a special tNL token and start reading the\n # next line as a separate statement.\n #\n # Note: block comments before leading dot are not supported on any version of Ruby.\n emit(:tNL, nil, @newline_s, @newline_s + 1)\n p = p - 1; @cs = 187; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 87 then\n# line 2579 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin emit(:tNL, nil, @newline_s, @newline_s + 1)\n p = p - 1; @cs = 187; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 91 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 181 then\n\tbegin begin p = (( @te))-1; end\n\n if @version < 27\n # Ruby before 2.7 doesn't support comments before leading dot.\n # If a line after \"a\" starts with a comment then \"a\" is a self-contained statement.\n # So in that case we emit a special tNL token and start reading the\n # next line as a separate statement.\n #\n # Note: block comments before leading dot are not supported on any version of Ruby.\n emit(:tNL, nil, @newline_s, @newline_s + 1)\n p = p - 1; @cs = 187; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 185 then\n\tbegin begin p = (( @te))-1; end\n emit(:tNL, nil, @newline_s, @newline_s + 1)\n p = p - 1; @cs = 187; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 519 then\n# line 2589 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit_comment(@eq_begin_s, @te)\n \tbegin\n\t\t @cs = (@cs_before_block_comment)\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 518 then\n# line 2597 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :fatal, :embedded_document, nil,\n range(@eq_begin_s, @eq_begin_s + '=begin'.length)\n end\n\t\tend\n\twhen 106 then\n# line 2607 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin @eq_begin_s = @ts\n \tbegin\n\t\t @cs = 999\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 2 then\n# line 2611 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = pe - 3 end\n\t\tend\n\twhen 98 then\n# line 2614 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin cmd_state = true; p = p - 1; \tbegin\n\t\t @cs = 803\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 99 then\n# line 528 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 100 then\n# line 2604 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 105 then\n# line 2607 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin @eq_begin_s = @ts\n \tbegin\n\t\t @cs = 999\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 104 then\n# line 2614 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin cmd_state = true; p = p - 1; \tbegin\n\t\t @cs = 803\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 1 then\n# line 2614 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin cmd_state = true; p = p - 1; \tbegin\n\t\t @cs = 803\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 67 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n\twhen 110 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 957 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 143 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 957 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 174 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 957 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 180 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 957 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 186 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 957 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 193 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 957 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 196 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 957 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 204 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 957 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 23 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1446 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if @version >= 31\n emit(:tBDOT3, '...'.freeze, @ts, @te - 1)\n emit(:tNL, \"\\n\".freeze, @te - 1, @te)\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n p -= 4;\n p = p - 1; \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 275 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1552 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Unlike expr_beg as invoked in the next rule, do not warn\n p = @ts - 1\n \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 266 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1615 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = tm - 1; \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 258 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1626 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = @ts - 1\n \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 358 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1934 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n escape = { \" \" => '\\s', \"\\r\" => '\\r', \"\\n\" => '\\n', \"\\t\" => '\\t',\n \"\\v\" => '\\v', \"\\f\" => '\\f' }[@source_buffer.slice(@ts + 1)]\n diagnostic :warning, :invalid_escape_use, { :escape => escape }, range\n\n p = @ts - 1\n \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 333 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 2062 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Here we scan and conditionally emit \"\\n\":\n # + if it's there\n # + and emitted we do nothing\n # + and not emitted we return `p` to \"\\n\" to process it on the next scan\n # + if it's not there we do nothing\n followed_by_nl = @te - 1 == @newline_s\n nl_emitted = false\n dots_te = followed_by_nl ? @te - 1 : @te\n\n if @version >= 30\n if @lambda_stack.any? && @lambda_stack.last + 1 == @paren_nest\n # To reject `->(...)` like `->...`\n emit(:tDOT3, '...'.freeze, @ts, dots_te)\n else\n emit(:tBDOT3, '...'.freeze, @ts, dots_te)\n\n if @version >= 31 && followed_by_nl && @context.in_def_open_args?\n emit(:tNL, @te - 1, @te)\n nl_emitted = true\n end\n end\n elsif @version >= 27\n emit(:tBDOT3, '...'.freeze, @ts, dots_te)\n else\n emit(:tDOT3, '...'.freeze, @ts, dots_te)\n end\n\n if followed_by_nl && !nl_emitted\n # return \"\\n\" to process it on the next scan\n p = p - 1;\n end\n\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 321 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 2136 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = @ts - 1\n @cs_before_block_comment = @cs\n \tbegin\n\t\t @cs = 187\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 443 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 2490 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if @paren_nest == 0\n diagnostic :warning, :triple_dot_at_eol, nil, range(@ts, @te - 1)\n end\n\n emit(:tDOT3, '...'.freeze, @ts, @te - 1)\n p = p - 1;\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 520 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 2589 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit_comment(@eq_begin_s, @te)\n \tbegin\n\t\t @cs = (@cs_before_block_comment)\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 517 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 2594 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n\t\tend\n\twhen 107 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 2607 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin @eq_begin_s = @ts\n \tbegin\n\t\t @cs = 999\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 3 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 2611 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = pe - 3 end\n\t\tend\n\twhen 465 then\n# line 636 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tRATIONAL, Rational(chars)) } \t\tend\n# line 2308 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 463 then\n# line 637 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tIMAGINARY, Complex(0, chars)) } \t\tend\n# line 2308 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 468 then\n# line 638 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tIMAGINARY, Complex(0, Rational(chars))) } \t\tend\n# line 2308 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 466 then\n# line 639 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars, @ts, @te - 2); p -= 2 } \t\tend\n# line 2308 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 464 then\n# line 640 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars, @ts, @te - 2); p -= 2 } \t\tend\n# line 2308 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 467 then\n# line 641 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars, @ts, @te - 6); p -= 6 } \t\tend\n# line 2308 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 456 then\n# line 645 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tIMAGINARY, Complex(0, Float(chars))) } \t\tend\n# line 2367 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 457 then\n# line 646 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tFLOAT, Float(chars), @ts, @te - 2); p -= 2 } \t\tend\n# line 2367 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 458 then\n# line 650 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tRATIONAL, Rational(chars)) } \t\tend\n# line 2367 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 460 then\n# line 651 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tIMAGINARY, Complex(0, Rational(chars))) } \t\tend\n# line 2367 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 459 then\n# line 652 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tFLOAT, Float(chars), @ts, @te - 6); p -= 6 } \t\tend\n# line 2367 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 137 then\n# line 667 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = \"\"\n\n codepoints = tok(@escape_s + 2, p - 1)\n codepoint_s = @escape_s + 2\n\n if @version < 24\n if codepoints.start_with?(\" \") || codepoints.start_with?(\"\\t\")\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(@escape_s + 2, @escape_s + 3)\n end\n\n if spaces_p = codepoints.index(/[ \\t]{2}/)\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(codepoint_s + spaces_p + 1, codepoint_s + spaces_p + 2)\n end\n\n if codepoints.end_with?(\" \") || codepoints.end_with?(\"\\t\")\n diagnostic :fatal, :invalid_unicode_escape, nil, range(p - 1, p)\n end\n end\n\n codepoints.scan(/([0-9a-fA-F]+)|([ \\t]+)/).each do |(codepoint_str, spaces)|\n if spaces\n codepoint_s += spaces.length\n else\n codepoint = codepoint_str.to_i(16)\n\n if codepoint >= 0x110000\n diagnostic :error, :unicode_point_too_large, nil,\n range(codepoint_s, codepoint_s + codepoint_str.length)\n break\n end\n\n @escape += codepoint.chr(Encoding::UTF_8)\n codepoint_s += codepoint_str.length\n end\n end\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 168 then\n# line 667 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = \"\"\n\n codepoints = tok(@escape_s + 2, p - 1)\n codepoint_s = @escape_s + 2\n\n if @version < 24\n if codepoints.start_with?(\" \") || codepoints.start_with?(\"\\t\")\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(@escape_s + 2, @escape_s + 3)\n end\n\n if spaces_p = codepoints.index(/[ \\t]{2}/)\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(codepoint_s + spaces_p + 1, codepoint_s + spaces_p + 2)\n end\n\n if codepoints.end_with?(\" \") || codepoints.end_with?(\"\\t\")\n diagnostic :fatal, :invalid_unicode_escape, nil, range(p - 1, p)\n end\n end\n\n codepoints.scan(/([0-9a-fA-F]+)|([ \\t]+)/).each do |(codepoint_str, spaces)|\n if spaces\n codepoint_s += spaces.length\n else\n codepoint = codepoint_str.to_i(16)\n\n if codepoint >= 0x110000\n diagnostic :error, :unicode_point_too_large, nil,\n range(codepoint_s, codepoint_s + codepoint_str.length)\n break\n end\n\n @escape += codepoint.chr(Encoding::UTF_8)\n codepoint_s += codepoint_str.length\n end\n end\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 382 then\n# line 667 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = \"\"\n\n codepoints = tok(@escape_s + 2, p - 1)\n codepoint_s = @escape_s + 2\n\n if @version < 24\n if codepoints.start_with?(\" \") || codepoints.start_with?(\"\\t\")\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(@escape_s + 2, @escape_s + 3)\n end\n\n if spaces_p = codepoints.index(/[ \\t]{2}/)\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(codepoint_s + spaces_p + 1, codepoint_s + spaces_p + 2)\n end\n\n if codepoints.end_with?(\" \") || codepoints.end_with?(\"\\t\")\n diagnostic :fatal, :invalid_unicode_escape, nil, range(p - 1, p)\n end\n end\n\n codepoints.scan(/([0-9a-fA-F]+)|([ \\t]+)/).each do |(codepoint_str, spaces)|\n if spaces\n codepoint_s += spaces.length\n else\n codepoint = codepoint_str.to_i(16)\n\n if codepoint >= 0x110000\n diagnostic :error, :unicode_point_too_large, nil,\n range(codepoint_s, codepoint_s + codepoint_str.length)\n break\n end\n\n @escape += codepoint.chr(Encoding::UTF_8)\n codepoint_s += codepoint_str.length\n end\n end\n \t\tend\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 118 then\n# line 707 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n\n if @version >= 30 && (codepoint == 117 || codepoint == 85) # 'u' or 'U'\n diagnostic :fatal, :invalid_escape\n end\n\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 149 then\n# line 707 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n\n if @version >= 30 && (codepoint == 117 || codepoint == 85) # 'u' or 'U'\n diagnostic :fatal, :invalid_escape\n end\n\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 363 then\n# line 707 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n\n if @version >= 30 && (codepoint == 117 || codepoint == 85) # 'u' or 'U'\n diagnostic :fatal, :invalid_escape\n end\n\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 121 then\n# line 719 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_escape\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 152 then\n# line 719 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_escape\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 366 then\n# line 719 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_escape\n \t\tend\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 123 then\n# line 747 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 154 then\n# line 747 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 368 then\n# line 747 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 120 then\n# line 755 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s, p).to_i(8) % 0x100) \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 151 then\n# line 755 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s, p).to_i(8) % 0x100) \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 365 then\n# line 755 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s, p).to_i(8) % 0x100) \t\tend\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 140 then\n# line 759 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s + 1, p).to_i(16)) \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 171 then\n# line 759 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s + 1, p).to_i(16)) \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 385 then\n# line 759 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s + 1, p).to_i(16)) \t\tend\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 139 then\n# line 763 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_hex_escape, nil, range(@escape_s - 1, p + 2)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 170 then\n# line 763 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_hex_escape, nil, range(@escape_s - 1, p + 2)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 384 then\n# line 763 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_hex_escape, nil, range(@escape_s - 1, p + 2)\n \t\tend\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 134 then\n# line 769 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = tok(@escape_s + 1, p).to_i(16).chr(Encoding::UTF_8) \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 165 then\n# line 769 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = tok(@escape_s + 1, p).to_i(16).chr(Encoding::UTF_8) \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 379 then\n# line 769 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = tok(@escape_s + 1, p).to_i(16).chr(Encoding::UTF_8) \t\tend\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 133 then\n# line 773 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 164 then\n# line 773 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 378 then\n# line 773 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 138 then\n# line 779 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 169 then\n# line 779 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 383 then\n# line 779 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 135 then\n# line 793 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :unterminated_unicode, nil, range(p - 1, p)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 166 then\n# line 793 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :unterminated_unicode, nil, range(p - 1, p)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 380 then\n# line 793 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :unterminated_unicode, nil, range(p - 1, p)\n \t\tend\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 119 then\n# line 819 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 150 then\n# line 819 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 364 then\n# line 819 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 177 then\n# line 825 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 182 then\n# line 825 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 58 then\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 31 then\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1566 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 33 then\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1586 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 35 then\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1614 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 219 then\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1427 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 238 then\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1457 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 246 then\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1489 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 34 then\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1626 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = @ts - 1\n \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 277 then\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1631 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 269 then\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1637 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 288 then\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1715 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 300 then\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1736 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 296 then\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1739 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 59 then\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1757 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tUNARY_NUM, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = p - 1; @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 50 then\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2117 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tIDENTIFIER, ident_tok, ident_ts, ident_te)\n p = ident_te - 1\n\n if !@static_env.nil? && @static_env.declared?(ident_tok) && @version < 25\n @cs = 466;\n else\n @cs = 526;\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 318 then\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2133 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 402 then\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2160 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 398 then\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2163 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @in_kwarg\n p = p - 1; \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n else\n \tbegin\n\t\t @cs = 187\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 411 then\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2191 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 408 then\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2194 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \tbegin\n\t\t @cs = 187\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 495 then\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2508 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 427 then\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2511 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \tbegin\n\t\t @cs = 991\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 101 then\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2604 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 268 then\n# line 1071 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n current_literal = literal\n if current_literal\n current_literal.start_interp_brace\n end\n \t\tend\n# line 1533 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n emit(:tLAMBEG, '{'.freeze, @te - 1, @te)\n else\n emit(:tLCURLY, '{'.freeze, @te - 1, @te)\n end\n @command_start = true\n @paren_nest += 1\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 290 then\n# line 1071 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n current_literal = literal\n if current_literal\n current_literal.start_interp_brace\n end\n \t\tend\n# line 1699 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n emit(:tLAMBEG, '{'.freeze)\n else\n emit(:tLBRACE_ARG, '{'.freeze)\n end\n @paren_nest += 1\n @command_start = true\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 393 then\n# line 1071 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n current_literal = literal\n if current_literal\n current_literal.start_interp_brace\n end\n \t\tend\n# line 1979 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n @command_start = true\n emit(:tLAMBEG, '{'.freeze)\n else\n emit(:tLBRACE, '{'.freeze)\n end\n @paren_nest += 1\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 508 then\n# line 1071 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n current_literal = literal\n if current_literal\n current_literal.start_interp_brace\n end\n \t\tend\n# line 2216 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n\n if tok == '{'.freeze\n emit(:tLAMBEG, '{'.freeze)\n else # 'do'\n emit(:kDO_LAMBDA, 'do'.freeze)\n end\n else\n if tok == '{'.freeze\n emit(:tLCURLY, '{'.freeze)\n else # 'do'\n emit_do\n end\n end\n if tok == '{'.freeze\n @paren_nest += 1\n end\n @command_start = true\n\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 509 then\n# line 1080 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n current_literal = literal\n if current_literal\n if current_literal.end_interp_brace_and_try_closing\n if version?(18, 19)\n emit(:tRCURLY, '}'.freeze, p - 1, p)\n @cond.lexpop\n @cmdarg.lexpop\n else\n emit(:tSTRING_DEND, '}'.freeze, p - 1, p)\n end\n\n if current_literal.saved_herebody_s\n @herebody_s = current_literal.saved_herebody_s\n end\n\n\n p = p - 1;\n @cs = (next_state_for_literal(current_literal));\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\n @paren_nest -= 1\n \t\tend\n# line 2453 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit_table(PUNCTUATION)\n\n if @version < 24\n @cond.lexpop\n @cmdarg.lexpop\n else\n @cond.pop\n @cmdarg.pop\n end\n\n if tok == '}'.freeze || tok == ']'.freeze\n if @version >= 25\n @cs = 811;\n else\n @cs = 532;\n end\n else # )\n # fnext expr_endfn; ?\n end\n\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 61 then\n# line 1229 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n\twhen 65 then\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 222 then\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1427 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 237 then\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1457 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 249 then\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1489 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 271 then\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1634 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 286 then\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1715 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 298 then\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1736 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 324 then\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2133 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 400 then\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2160 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 410 then\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2191 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 431 then\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2508 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 103 then\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2604 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 243 then\n# line 1273 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1479 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tFID, tok(@ts, tm), @ts, tm)\n @cs = (arg_or_cmdarg(cmd_state)); p = tm - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 338 then\n# line 1273 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1885 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 309 then\n# line 1273 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 2106 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 424 then\n# line 1273 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 145 then\n\tbegin begin p = (( @te))-1; end\n\n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n\n if tok == '{'.freeze\n emit(:tLAMBEG, '{'.freeze)\n else # 'do'\n emit(:kDO_LAMBDA, 'do'.freeze)\n end\n else\n if tok == '{'.freeze\n emit(:tLCURLY, '{'.freeze)\n else # 'do'\n emit_do\n end\n end\n if tok == '{'.freeze\n @paren_nest += 1\n end\n @command_start = true\n\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 146 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 353; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 147 then\n\tbegin begin p = (( @te))-1; end\n emit(:kCLASS, 'class'.freeze, @ts, @ts + 5)\n emit(:tLSHFT, '<<'.freeze, @te - 2, @te)\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 148 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 149 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @command_start = true\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 150 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 540; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 151 then\n\tbegin begin p = (( @te))-1; end\n\n emit_table(KEYWORDS)\n\n if version?(18) && tok == 'not'.freeze\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = 495; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 152 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18)\n emit(:tIDENTIFIER)\n\n unless !@static_env.nil? && @static_env.declared?(tok)\n @cs = (arg_or_cmdarg(cmd_state));\n end\n else\n emit(:k__ENCODING__, '__ENCODING__'.freeze)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 153 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 154 then\n\tbegin begin p = (( @te))-1; end\n\n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 156 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18, 19, 20)\n diagnostic :error,\n :trailing_in_number, { :character => tok(@te - 1, @te) },\n range(@te - 1, @te)\n else\n emit(:tINTEGER, tok(@ts, @te - 1).to_i, @ts, @te - 1)\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 157 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18, 19, 20)\n diagnostic :error,\n :trailing_in_number, { :character => tok(@te - 1, @te) },\n range(@te - 1, @te)\n else\n emit(:tFLOAT, tok(@ts, @te - 1).to_f, @ts, @te - 1)\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 158 then\n\tbegin begin p = (( @te))-1; end\n\n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 160 then\n\tbegin begin p = (( @te))-1; end\n emit(:tCONSTANT)\n @cs = (arg_or_cmdarg(cmd_state)); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 164 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg(cmd_state)); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 165 then\n\tbegin begin p = (( @te))-1; end\n\n if tm == @te\n # Suffix was consumed, e.g. foo!\n emit(:tFID)\n else\n # Suffix was not consumed, e.g. foo!=\n emit(:tIDENTIFIER, tok(@ts, tm), @ts, tm)\n p = tm - 1\n end\n @cs = 495; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 167 then\n\tbegin begin p = (( @te))-1; end\n\n emit_table(PUNCTUATION);\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 168 then\n\tbegin begin p = (( @te))-1; end\n emit_table(PUNCTUATION)\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 244 then\n# line 1274 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1479 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tFID, tok(@ts, tm), @ts, tm)\n @cs = (arg_or_cmdarg(cmd_state)); p = tm - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 339 then\n# line 1274 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1885 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 311 then\n# line 1274 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 2106 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 425 then\n# line 1274 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 2417 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if tm == @te\n # Suffix was consumed, e.g. foo!\n emit(:tFID)\n else\n # Suffix was not consumed, e.g. foo!=\n emit(:tIDENTIFIER, tok(@ts, tm), @ts, tm)\n p = tm - 1\n end\n @cs = 495; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 340 then\n# line 1279 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1885 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 312 then\n# line 1279 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 2106 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 341 then\n# line 1280 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1885 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 313 then\n# line 1280 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 2106 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 345 then\n# line 1281 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1885 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 316 then\n# line 1281 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 2106 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 344 then\n# line 1282 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1885 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 315 then\n# line 1282 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 99 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tUNARY_NUM, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = p - 1; @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 106 then\n\tbegin begin p = (( @te))-1; end\n\n diagnostic :error, :unterminated_heredoc_id, nil, range(@ts, @ts + 1)\n end\n\twhen 117 then\n\tbegin begin p = (( @te))-1; end\n\n if @version >= 27\n emit(:tPIPE, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = p - 1;\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n p -= 2\n \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n end\n\twhen 121 then\n\tbegin begin p = (( @te))-1; end\n emit_table(PUNCTUATION_BEGIN)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 122 then\n\tbegin begin p = (( @te))-1; end\n emit(:kRESCUE, 'rescue'.freeze, @ts, tm)\n p = tm - 1\n @cs = 540; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 123 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS_BEGIN)\n @command_start = true\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 127 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1\n \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\twhen 128 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg(cmd_state)); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 132 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1; \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 342 then\n# line 1283 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 3 \t\tend\n# line 1885 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 314 then\n# line 1283 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 3 \t\tend\n# line 2106 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 343 then\n# line 1288 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1885 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 490 then\n# line 1293 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 2399 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tCONSTANT, tok(@ts, tm), @ts, tm)\n p = tm - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 267 then\n# line 1299 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n \t\tend\n# line 1527 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tLBRACK, '['.freeze, @te - 1, @te)\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 386 then\n# line 1299 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n \t\tend\n# line 1993 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tLBRACK, '['.freeze)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 493 then\n# line 1299 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n \t\tend\n# line 2486 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tLBRACK2, '['.freeze)\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 496 then\n# line 1305 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @paren_nest -= 1\n \t\tend\n# line 2453 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit_table(PUNCTUATION)\n\n if @version < 24\n @cond.lexpop\n @cmdarg.lexpop\n else\n @cond.pop\n @cmdarg.pop\n end\n\n if tok == '}'.freeze || tok == ']'.freeze\n if @version >= 25\n @cs = 811;\n else\n @cs = 532;\n end\n else # )\n # fnext expr_endfn; ?\n end\n\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 259 then\n# line 1312 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n\n if version?(18)\n @command_start = true\n end\n \t\tend\n# line 1508 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if version?(18)\n emit(:tLPAREN2, '('.freeze, @te - 1, @te)\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n emit(:tLPAREN_ARG, '('.freeze, @te - 1, @te)\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 272 then\n# line 1312 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n\n if version?(18)\n @command_start = true\n end\n \t\tend\n# line 1521 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tLPAREN2, '('.freeze)\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 280 then\n# line 1312 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n\n if version?(18)\n @command_start = true\n end\n \t\tend\n# line 1653 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tLPAREN_ARG, '('.freeze, @te - 1, @te)\n if version?(18)\n @cs = 803; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 328 then\n# line 1312 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n\n if version?(18)\n @command_start = true\n end\n \t\tend\n# line 1998 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tLPAREN, '('.freeze)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 437 then\n# line 1312 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n\n if version?(18)\n @command_start = true\n end\n \t\tend\n# line 2449 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 438 then\n# line 1322 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @paren_nest -= 1\n \t\tend\n# line 2453 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit_table(PUNCTUATION)\n\n if @version < 24\n @cond.lexpop\n @cmdarg.lexpop\n else\n @cond.pop\n @cmdarg.pop\n end\n\n if tok == '}'.freeze || tok == ']'.freeze\n if @version >= 25\n @cs = 811;\n else\n @cs = 532;\n end\n else # )\n # fnext expr_endfn; ?\n end\n\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 72 then\n# line 1806 \"lib/parser/lexer.rl\"\n\t\tbegin\n heredoc_e = p \t\tend\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 353 then\n# line 1807 \"lib/parser/lexer.rl\"\n\t\tbegin\n new_herebody_s = p \t\tend\n# line 1808 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n tok(@ts, heredoc_e) =~ /^<<(-?)(~?)([\"'`]?)(.*)\\3$/m\n\n indent = !$1.empty? || !$2.empty?\n dedent_body = !$2.empty?\n type = $3.empty? ? '<<\"'.freeze : ('<<'.freeze + $3)\n delimiter = $4\n\n if @version >= 27\n if delimiter.count(\"\\n\") > 0 || delimiter.count(\"\\r\") > 0\n diagnostic :error, :unterminated_heredoc_id, nil, range(@ts, @ts + 1)\n end\n elsif @version >= 24\n if delimiter.count(\"\\n\") > 0\n if delimiter.end_with?(\"\\n\")\n diagnostic :warning, :heredoc_id_ends_with_nl, nil, range(@ts, @ts + 1)\n delimiter = delimiter.rstrip\n else\n diagnostic :fatal, :heredoc_id_has_newline, nil, range(@ts, @ts + 1)\n end\n end\n end\n\n if dedent_body && version?(18, 19, 20, 21, 22)\n emit(:tLSHFT, '<<'.freeze, @ts, @ts + 2)\n p = @ts + 1\n @cs = 564; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (push_literal(type, delimiter, @ts, heredoc_e, indent, dedent_body));\n\n @herebody_s ||= new_herebody_s\n p = @herebody_s - 1\n end\n end\n\t\tend\n\twhen 348 then\n# line 1898 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 1; diag_msg = :ivar_name \t\tend\n# line 1901 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @version >= 27\n diagnostic :error, diag_msg, { name: tok(tm, @te) }, range(tm, @te)\n else\n emit(:tCOLON, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = @ts\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 351 then\n# line 1899 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2; diag_msg = :cvar_name \t\tend\n# line 1901 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @version >= 27\n diagnostic :error, diag_msg, { name: tok(tm, @te) }, range(tm, @te)\n else\n emit(:tCOLON, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = @ts\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 359 then\n# line 1919 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = nil \t\tend\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 389 then\n# line 2008 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 2009 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:kRESCUE, 'rescue'.freeze, @ts, tm)\n p = tm - 1\n @cs = 540; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 480 then\n# line 2301 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 16; @num_digits_s = p \t\tend\n# line 2307 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 474 then\n# line 2302 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = p \t\tend\n# line 2307 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 477 then\n# line 2303 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = p \t\tend\n# line 2307 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 471 then\n# line 2304 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 2; @num_digits_s = p \t\tend\n# line 2307 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 486 then\n# line 2305 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n# line 2307 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 449 then\n# line 2306 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = @ts \t\tend\n# line 2307 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 487 then\n# line 2307 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 635 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n\twhen 8 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 453 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2365 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 228 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1386 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 43;\t\tend\n\twhen 215 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1390 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 44;\t\tend\n\twhen 211 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1394 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 45;\t\tend\n\twhen 27 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1570 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 72;\t\tend\n\twhen 261 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1587 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 73;\t\tend\n\twhen 28 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1626 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 78;\t\tend\n\twhen 254 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1631 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 79;\t\tend\n\twhen 281 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1663 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 85;\t\tend\n\twhen 46 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1676 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 86;\t\tend\n\twhen 302 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1730 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 93;\t\tend\n\twhen 291 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1734 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 94;\t\tend\n\twhen 70 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1854 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 106;\t\tend\n\twhen 394 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1962 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 117;\t\tend\n\twhen 305 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2003 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 121;\t\tend\n\twhen 388 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2009 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 122;\t\tend\n\twhen 387 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2015 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 123;\t\tend\n\twhen 75 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2106 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 127;\t\tend\n\twhen 303 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1327 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 128;\t\tend\n\twhen 306 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2152 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 132;\t\tend\n\twhen 504 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2216 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 145;\t\tend\n\twhen 499 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2245 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 146;\t\tend\n\twhen 507 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2255 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 148;\t\tend\n\twhen 500 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2260 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 149;\t\tend\n\twhen 501 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2265 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 150;\t\tend\n\twhen 506 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2269 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 151;\t\tend\n\twhen 498 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2280 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 152;\t\tend\n\twhen 492 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2294 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 153;\t\tend\n\twhen 418 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2308 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 154;\t\tend\n\twhen 451 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2352 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 157;\t\tend\n\twhen 82 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2367 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 158;\t\tend\n\twhen 421 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2395 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 160;\t\tend\n\twhen 412 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1327 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 164;\t\tend\n\twhen 423 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2417 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 165;\t\tend\n\twhen 416 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2443 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 167;\t\tend\n\twhen 422 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2449 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 168;\t\tend\n\twhen 88 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2540 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 181;\t\tend\n\twhen 511 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2579 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 185;\t\tend\n\twhen 183 then\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 957 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n# line 825 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n\twhen 124 then\n# line 707 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n\n if @version >= 30 && (codepoint == 117 || codepoint == 85) # 'u' or 'U'\n diagnostic :fatal, :invalid_escape\n end\n\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 731 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 155 then\n# line 707 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n\n if @version >= 30 && (codepoint == 117 || codepoint == 85) # 'u' or 'U'\n diagnostic :fatal, :invalid_escape\n end\n\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 731 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 369 then\n# line 707 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n\n if @version >= 30 && (codepoint == 117 || codepoint == 85) # 'u' or 'U'\n diagnostic :fatal, :invalid_escape\n end\n\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 731 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 131 then\n# line 707 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n\n if @version >= 30 && (codepoint == 117 || codepoint == 85) # 'u' or 'U'\n diagnostic :fatal, :invalid_escape\n end\n\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 735 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 162 then\n# line 707 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n\n if @version >= 30 && (codepoint == 117 || codepoint == 85) # 'u' or 'U'\n diagnostic :fatal, :invalid_escape\n end\n\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 735 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 376 then\n# line 707 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n\n if @version >= 30 && (codepoint == 117 || codepoint == 85) # 'u' or 'U'\n diagnostic :fatal, :invalid_escape\n end\n\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 735 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 122 then\n# line 723 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = @source_buffer.slice(p - 1).chr\n\n if @version >= 27 && ((0..8).include?(@escape.ord) || (14..31).include?(@escape.ord))\n diagnostic :fatal, :invalid_escape\n end\n \t\tend\n# line 731 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 153 then\n# line 723 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = @source_buffer.slice(p - 1).chr\n\n if @version >= 27 && ((0..8).include?(@escape.ord) || (14..31).include?(@escape.ord))\n diagnostic :fatal, :invalid_escape\n end\n \t\tend\n# line 731 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 367 then\n# line 723 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = @source_buffer.slice(p - 1).chr\n\n if @version >= 27 && ((0..8).include?(@escape.ord) || (14..31).include?(@escape.ord))\n diagnostic :fatal, :invalid_escape\n end\n \t\tend\n# line 731 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 130 then\n# line 723 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = @source_buffer.slice(p - 1).chr\n\n if @version >= 27 && ((0..8).include?(@escape.ord) || (14..31).include?(@escape.ord))\n diagnostic :fatal, :invalid_escape\n end\n \t\tend\n# line 735 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 161 then\n# line 723 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = @source_buffer.slice(p - 1).chr\n\n if @version >= 27 && ((0..8).include?(@escape.ord) || (14..31).include?(@escape.ord))\n diagnostic :fatal, :invalid_escape\n end\n \t\tend\n# line 735 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 375 then\n# line 723 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = @source_buffer.slice(p - 1).chr\n\n if @version >= 27 && ((0..8).include?(@escape.ord) || (14..31).include?(@escape.ord))\n diagnostic :fatal, :invalid_escape\n end\n \t\tend\n# line 735 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 126 then\n# line 747 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 735 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 157 then\n# line 747 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 735 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 371 then\n# line 747 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 735 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 129 then\n# line 748 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(p - 2, p).to_i(16)) \t\tend\n# line 731 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 160 then\n# line 748 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(p - 2, p).to_i(16)) \t\tend\n# line 731 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 374 then\n# line 748 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(p - 2, p).to_i(16)) \t\tend\n# line 731 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 136 then\n# line 779 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 793 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :unterminated_unicode, nil, range(p - 1, p)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 167 then\n# line 779 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 793 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :unterminated_unicode, nil, range(p - 1, p)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 381 then\n# line 779 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 793 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :unterminated_unicode, nil, range(p - 1, p)\n \t\tend\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 116 then\n# line 825 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n# line 819 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 148 then\n# line 825 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n# line 819 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 362 then\n# line 825 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n# line 819 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 62 then\n# line 1229 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 221 then\n# line 1229 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1427 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 236 then\n# line 1229 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1457 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 248 then\n# line 1229 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1489 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 270 then\n# line 1229 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1634 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \tbegin\n\t\t @cs = 811\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 285 then\n# line 1229 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1715 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 297 then\n# line 1229 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1736 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 323 then\n# line 1229 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2133 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 399 then\n# line 1229 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2160 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 409 then\n# line 1229 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2191 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 430 then\n# line 1229 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2508 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 102 then\n# line 1229 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2604 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 482 then\n# line 2305 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n# line 2307 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 635 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n\twhen 446 then\n# line 2306 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = @ts \t\tend\n# line 2307 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 635 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n\twhen 461 then\n# line 2307 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 635 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2308 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 454 then\n# line 2364 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 644 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tFLOAT, Float(chars)) } \t\tend\n# line 2367 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 450 then\n# line 2365 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 644 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tFLOAT, Float(chars)) } \t\tend\n# line 2367 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 276 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 501 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1631 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 79;\t\tend\n\twhen 36 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1626 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 78;\t\tend\n\twhen 47 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1676 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 86;\t\tend\n\twhen 94 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2540 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 181;\t\tend\n\twhen 66 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1757 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 99;\t\tend\n\twhen 85 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2249 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 147;\t\tend\n\twhen 93 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2540 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 181;\t\tend\n\twhen 38 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1586 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1587 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 73;\t\tend\n\twhen 391 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2008 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 2106 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 127;\t\tend\n\twhen 390 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2008 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1327 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 128;\t\tend\n\twhen 483 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2305 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n# line 2308 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 154;\t\tend\n\twhen 127 then\n# line 707 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n\n if @version >= 30 && (codepoint == 117 || codepoint == 85) # 'u' or 'U'\n diagnostic :fatal, :invalid_escape\n end\n\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 731 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 735 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 158 then\n# line 707 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n\n if @version >= 30 && (codepoint == 117 || codepoint == 85) # 'u' or 'U'\n diagnostic :fatal, :invalid_escape\n end\n\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 731 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 735 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 372 then\n# line 707 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n\n if @version >= 30 && (codepoint == 117 || codepoint == 85) # 'u' or 'U'\n diagnostic :fatal, :invalid_escape\n end\n\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 731 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 735 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 125 then\n# line 723 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = @source_buffer.slice(p - 1).chr\n\n if @version >= 27 && ((0..8).include?(@escape.ord) || (14..31).include?(@escape.ord))\n diagnostic :fatal, :invalid_escape\n end\n \t\tend\n# line 731 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 735 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 156 then\n# line 723 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = @source_buffer.slice(p - 1).chr\n\n if @version >= 27 && ((0..8).include?(@escape.ord) || (14..31).include?(@escape.ord))\n diagnostic :fatal, :invalid_escape\n end\n \t\tend\n# line 731 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 735 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 370 then\n# line 723 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = @source_buffer.slice(p - 1).chr\n\n if @version >= 27 && ((0..8).include?(@escape.ord) || (14..31).include?(@escape.ord))\n diagnostic :fatal, :invalid_escape\n end\n \t\tend\n# line 731 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 735 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 132 then\n# line 741 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(p - 2, p).to_i(16)) \t\tend\n# line 731 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 735 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 163 then\n# line 741 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(p - 2, p).to_i(16)) \t\tend\n# line 731 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 735 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 377 then\n# line 741 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(p - 2, p).to_i(16)) \t\tend\n# line 731 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 735 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 128 then\n# line 748 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(p - 2, p).to_i(16)) \t\tend\n# line 731 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 735 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 159 then\n# line 748 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(p - 2, p).to_i(16)) \t\tend\n# line 731 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 735 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 898 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp? && @version >= 31 && %w[c C m M].include?(escaped_char)\n # Ruby >= 3.1 escapes \\c- and \\m chars, that's the only escape sequence\n # supported by regexes so far, so it needs a separate branch.\n current_literal.extend_string(@escape, @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 373 then\n# line 748 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(p - 2, p).to_i(16)) \t\tend\n# line 731 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 735 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 811; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 478 then\n# line 2301 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 16; @num_digits_s = p \t\tend\n# line 2307 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 635 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2308 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 472 then\n# line 2302 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = p \t\tend\n# line 2307 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 635 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2308 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 475 then\n# line 2303 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = p \t\tend\n# line 2307 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 635 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2308 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 469 then\n# line 2304 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 2; @num_digits_s = p \t\tend\n# line 2307 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 635 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2308 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 481 then\n# line 2305 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n# line 2307 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 635 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2308 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 445 then\n# line 2306 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = @ts \t\tend\n# line 2307 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 635 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2308 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 32 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 865 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1586 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1587 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 73;\t\tend\n\twhen 63 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1229 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1757 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 99;\t\tend\n\twhen 92 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1229 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1232 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2540 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 181;\t\tend\n\twhen 488 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2307 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 635 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2340 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 156;\t\tend\n\twhen 484 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2305 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n# line 2307 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 635 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2340 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 156;\t\tend\n\twhen 448 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2306 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = @ts \t\tend\n# line 2307 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 635 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2340 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 156;\t\tend\n# line 24782 \"lib/parser/lexer.rb\"\n\tend\n\tend\n\tend\n\tif _goto_level <= _again\n\tcase _lex_to_state_actions[ @cs] \n\twhen 96 then\n# line 1 \"NONE\"\n\t\tbegin\n @ts = nil;\t\tend\n# line 24792 \"lib/parser/lexer.rb\"\n\tend\n\n\tif @cs == 0\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\tp += 1\n\tif p != pe\n\t\t_goto_level = _resume\n\t\tnext\n\tend\n\tend\n\tif _goto_level <= _test_eof\n\tif p == eof\n\tif _lex_eof_trans[ @cs] > 0\n\t\t_trans = _lex_eof_trans[ @cs] - 1;\n\t\t_goto_level = _eof_trans\n\t\tnext;\n\tend\n\tend\n\n\tend\n\tif _goto_level <= _out\n\t\tbreak\n\tend\nend\n\tend\n\n# line 286 \"lib/parser/lexer.rl\"\n # %\n\n # Ragel creates a local variable called `testEof` but it doesn't use\n # it in any assignment. This dead code is here to swallow the warning.\n # It has no runtime cost because Ruby doesn't produce any instructions from it.\n if false\n testEof\n end\n\n @p = p\n\n if @token_queue.any?\n @token_queue.shift\n elsif @cs == klass.lex_error\n [ false, [ '$error'.freeze, range(p - 1, p) ] ]\n else\n eof = @source_pts.size\n [ false, [ '$eof'.freeze, range(eof, eof) ] ]\n end\n end", "def forward(offset = 1)\n self[offset]\n @buffer.slice!(0, offset)\n end", "def peek; end", "def peek; end", "def advance\n if @token_queue.any?\n return @token_queue.shift\n end\n\n # Ugly, but dependent on Ragel output. Consider refactoring it somehow.\n klass = self.class\n _lex_trans_keys = klass.send :_lex_trans_keys\n _lex_key_spans = klass.send :_lex_key_spans\n _lex_index_offsets = klass.send :_lex_index_offsets\n _lex_indicies = klass.send :_lex_indicies\n _lex_trans_targs = klass.send :_lex_trans_targs\n _lex_trans_actions = klass.send :_lex_trans_actions\n _lex_to_state_actions = klass.send :_lex_to_state_actions\n _lex_from_state_actions = klass.send :_lex_from_state_actions\n _lex_eof_trans = klass.send :_lex_eof_trans\n\n pe = @source_pts.size + 2\n p, eof = @p, pe\n\n @command_state = (@cs == klass.lex_en_expr_value ||\n @cs == klass.lex_en_line_begin)\n\n \n# line 10834 \"lib/parser/lexer.rb\"\nbegin\n\ttestEof = false\n\t_slen, _trans, _keys, _inds, _acts, _nacts = nil\n\t_goto_level = 0\n\t_resume = 10\n\t_eof_trans = 15\n\t_again = 20\n\t_test_eof = 30\n\t_out = 40\n\twhile true\n\tif _goto_level <= 0\n\tif p == pe\n\t\t_goto_level = _test_eof\n\t\tnext\n\tend\n\tif @cs == 0\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\tend\n\tif _goto_level <= _resume\n\tcase _lex_from_state_actions[ @cs] \n\twhen 89 then\n# line 1 \"NONE\"\n\t\tbegin\n @ts = p\n\t\tend\n# line 10862 \"lib/parser/lexer.rb\"\n\tend\n\t_keys = @cs << 1\n\t_inds = _lex_index_offsets[ @cs]\n\t_slen = _lex_key_spans[ @cs]\n\t_wide = ( (@source_pts[p] || 0))\n\t_trans = if ( _slen > 0 && \n\t\t\t_lex_trans_keys[_keys] <= _wide && \n\t\t\t_wide <= _lex_trans_keys[_keys + 1] \n\t\t ) then\n\t\t\t_lex_indicies[ _inds + _wide - _lex_trans_keys[_keys] ] \n\t\t else \n\t\t\t_lex_indicies[ _inds + _slen ]\n\t\t end\n\tend\n\tif _goto_level <= _eof_trans\n\t @cs = _lex_trans_targs[_trans]\n\tif _lex_trans_actions[_trans] != 0\n\tcase _lex_trans_actions[_trans]\n\twhen 28 then\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 108 then\n# line 799 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n\twhen 29 then\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n\twhen 59 then\n# line 1183 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n\twhen 63 then\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n\twhen 292 then\n# line 1227 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 36 then\n# line 1496 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 38 then\n# line 1512 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 40 then\n# line 1540 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 69 then\n# line 1730 \"lib/parser/lexer.rl\"\n\t\tbegin\n heredoc_e = p \t\tend\n\twhen 334 then\n# line 1810 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = nil \t\tend\n\twhen 363 then\n# line 1879 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 290 then\n# line 1934 \"lib/parser/lexer.rl\"\n\t\tbegin\n ident_tok = tok; ident_ts = @ts; ident_te = @te; \t\tend\n\twhen 449 then\n# line 2115 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 16; @num_digits_s = p \t\tend\n\twhen 443 then\n# line 2116 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = p \t\tend\n\twhen 446 then\n# line 2117 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = p \t\tend\n\twhen 440 then\n# line 2118 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 2; @num_digits_s = p \t\tend\n\twhen 455 then\n# line 2119 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n\twhen 417 then\n# line 2120 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = @ts \t\tend\n\twhen 432 then\n# line 2121 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 425 then\n# line 2178 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 422 then\n# line 2179 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 85 then\n# line 2337 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 7 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n\twhen 105 then\n# line 1065 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DBEG, '#{'.freeze)\n\n if current_literal.heredoc?\n current_literal.saved_herebody_s = @herebody_s\n @herebody_s = nil\n end\n\n current_literal.start_interp_brace\n @cs = 759;\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 5 then\n# line 1002 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 320\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 101 then\n# line 927 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 100 then\n# line 848 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 752;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 106 then\n# line 1002 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 320\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 103 then\n# line 989 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n literal.extend_space @ts, @te\n end\n\t\tend\n\twhen 104 then\n# line 848 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 752;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 6 then\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 4 then\n# line 848 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 752;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 133 then\n# line 1065 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DBEG, '#{'.freeze)\n\n if current_literal.heredoc?\n current_literal.saved_herebody_s = @herebody_s\n @herebody_s = nil\n end\n\n current_literal.start_interp_brace\n @cs = 759;\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 10 then\n# line 1002 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 320\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 130 then\n# line 927 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 129 then\n# line 848 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 752;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 134 then\n# line 1002 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 320\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 132 then\n# line 848 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 752;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 11 then\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 9 then\n# line 848 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 752;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 157 then\n# line 927 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 156 then\n# line 848 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 752;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 159 then\n# line 989 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n literal.extend_space @ts, @te\n end\n\t\tend\n\twhen 160 then\n# line 848 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 752;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 163 then\n# line 927 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 162 then\n# line 848 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 752;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 165 then\n# line 848 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 752;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 172 then\n# line 1065 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DBEG, '#{'.freeze)\n\n if current_literal.heredoc?\n current_literal.saved_herebody_s = @herebody_s\n @herebody_s = nil\n end\n\n current_literal.start_interp_brace\n @cs = 759;\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 13 then\n# line 1002 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 320\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 169 then\n# line 927 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 168 then\n# line 848 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 752;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 173 then\n# line 1002 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 320\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 171 then\n# line 848 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 752;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 12 then\n# line 848 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 752;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 175 then\n# line 927 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 174 then\n# line 848 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 752;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 182 then\n# line 1065 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DBEG, '#{'.freeze)\n\n if current_literal.heredoc?\n current_literal.saved_herebody_s = @herebody_s\n @herebody_s = nil\n end\n\n current_literal.start_interp_brace\n @cs = 759;\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 15 then\n# line 1002 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 320\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 178 then\n# line 927 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 177 then\n# line 848 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 752;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 183 then\n# line 1002 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 320\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 180 then\n# line 989 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n literal.extend_space @ts, @te\n end\n\t\tend\n\twhen 181 then\n# line 848 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 752;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 14 then\n# line 848 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 752;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 185 then\n# line 927 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 184 then\n# line 848 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 752;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 187 then\n# line 989 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n literal.extend_space @ts, @te\n end\n\t\tend\n\twhen 188 then\n# line 1157 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tREGEXP_OPT, tok(@ts, @te - 1), @ts, @te - 1)\n p = p - 1;\n \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 189 then\n# line 1144 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n unknown_options = tok.scan(/[^imxouesn]/)\n if unknown_options.any?\n diagnostic :error, :regexp_options,\n { :options => unknown_options.join }\n end\n\n emit(:tREGEXP_OPT)\n @cs = 767;\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 16 then\n# line 1286 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if tok =~ /^\\$([1-9][0-9]*)$/\n emit(:tNTH_REF, tok(@ts + 1).to_i)\n elsif tok =~ /^\\$([&`'+])$/\n emit(:tBACK_REF)\n else\n emit(:tGVAR)\n end\n\n @cs = (stack_pop); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 190 then\n# line 1286 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if tok =~ /^\\$([1-9][0-9]*)$/\n emit(:tNTH_REF, tok(@ts + 1).to_i)\n elsif tok =~ /^\\$([&`'+])$/\n emit(:tBACK_REF)\n else\n emit(:tGVAR)\n end\n\n @cs = (stack_pop); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 192 then\n# line 1299 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if tok =~ /^@@[0-9]/\n diagnostic :error, :cvar_name, { :name => tok }\n end\n\n emit(:tCVAR)\n @cs = (stack_pop); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 191 then\n# line 1309 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if tok =~ /^@[0-9]/\n diagnostic :error, :ivar_name, { :name => tok }\n end\n\n emit(:tIVAR)\n @cs = (stack_pop); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 213 then\n# line 1330 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(KEYWORDS_BEGIN);\n @cs = 438; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 199 then\n# line 1338 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tIDENTIFIER)\n @cs = 438; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 18 then\n# line 1342 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1\n @cs = 767; \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 320\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 196 then\n# line 1351 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION)\n @cs = 438; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 208 then\n# line 1355 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; p = p - 1; \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 20 then\n# line 1361 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if version?(23)\n type, delimiter = tok[0..-2], tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n else\n p = @ts - 1\n \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 195 then\n# line 1374 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 194 then\n# line 517 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 212 then\n# line 1330 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(KEYWORDS_BEGIN);\n @cs = 438; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 209 then\n# line 1334 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tCONSTANT)\n @cs = 438; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 211 then\n# line 1338 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tIDENTIFIER)\n @cs = 438; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 206 then\n# line 1342 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n @cs = 767; \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 320\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 202 then\n# line 1351 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 438; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 207 then\n# line 1358 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 535\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 200 then\n# line 1371 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 205 then\n# line 1374 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 19 then\n# line 1351 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin emit_table(PUNCTUATION)\n @cs = 438; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 17 then\n# line 1374 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = p - 1; \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 198 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 39 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS_BEGIN);\n @cs = 438; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 40 then\n\tbegin begin p = (( @te))-1; end\n emit(:tCONSTANT)\n @cs = 438; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 41 then\n\tbegin begin p = (( @te))-1; end\n emit(:tIDENTIFIER)\n @cs = 438; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 22 then\n# line 1386 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tLABEL, tok(@ts, @te - 2), @ts, @te - 1)\n p = p - 1; @cs = 752; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 215 then\n# line 1392 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 214 then\n# line 517 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 217 then\n# line 1389 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 216 then\n# line 1392 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 21 then\n# line 1392 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = p - 1; \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 223 then\n# line 1418 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION)\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 222 then\n# line 1424 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 221 then\n# line 517 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 233 then\n# line 1403 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tCONSTANT)\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 224 then\n# line 1407 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tIDENTIFIER)\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 229 then\n# line 1418 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 227 then\n# line 1421 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 232 then\n# line 1424 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 256 then\n# line 1482 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Unlike expr_beg as invoked in the next rule, do not warn\n p = @ts - 1\n \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 239 then\n# line 1500 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if tok(tm, tm + 1) == '/'.freeze\n # Ambiguous regexp literal.\n diagnostic :warning, :ambiguous_literal, nil, range(tm, tm + 1)\n end\n\n p = tm - 1\n \tbegin\n\t\t @cs = 535\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 245 then\n# line 1524 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; p = p - 1; \tbegin\n\t\t @cs = 535\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 24 then\n# line 1532 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1; \tbegin\n\t\t @cs = 535\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 247 then\n# line 1541 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = tm - 1; \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 39 then\n# line 1552 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = @ts - 1\n \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 234 then\n# line 1566 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 535\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 235 then\n# line 517 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 246 then\n# line 1491 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 535\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 242 then\n# line 1513 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :warning, :ambiguous_prefix, { :prefix => tok(tm, @te) },\n range(tm, @te)\n\n p = tm - 1\n \tbegin\n\t\t @cs = 535\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 244 then\n# line 1529 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 535\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 238 then\n# line 1552 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n p = @ts - 1\n \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 237 then\n# line 1557 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 255 then\n# line 1566 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 535\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 25 then\n# line 1557 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n\t\tend\n\twhen 41 then\n# line 1566 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = p - 1; \tbegin\n\t\t @cs = 535\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 23 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 67 then\n\tbegin begin p = (( @te))-1; end\n\n if tok(tm, tm + 1) == '/'.freeze\n # Ambiguous regexp literal.\n diagnostic :warning, :ambiguous_literal, nil, range(tm, tm + 1)\n end\n\n p = tm - 1\n \tbegin\n\t\t @cs = 535\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\twhen 68 then\n\tbegin begin p = (( @te))-1; end\n\n diagnostic :warning, :ambiguous_prefix, { :prefix => tok(tm, @te) },\n range(tm, @te)\n\n p = tm - 1\n \tbegin\n\t\t @cs = 535\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\twhen 73 then\n\tbegin begin p = (( @te))-1; end\n\n p = @ts - 1\n \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\telse\n\tbegin begin p = (( @te))-1; end\nend\nend \n\t\t\tend\n\twhen 43 then\n# line 1602 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1\n \tbegin\n\t\t @cs = 466\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 260 then\n# line 517 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 261 then\n# line 1602 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 466\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 44 then\n# line 1602 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = @ts - 1\n \tbegin\n\t\t @cs = 466\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 42 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 80 then\n\tbegin begin p = (( @te))-1; end\n\n if @cond.active?\n emit(:kDO_COND, 'do'.freeze, @te - 2, @te)\n else\n emit(:kDO, 'do'.freeze, @te - 2, @te)\n end\n @cs = 759; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 81 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1\n \tbegin\n\t\t @cs = 466\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 271 then\n# line 1636 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_do(true)\n @cs = 759; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 264 then\n# line 1642 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 265 then\n# line 517 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 266 then\n# line 1639 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 269 then\n# line 1642 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 275 then\n# line 1666 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 535\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 274 then\n# line 517 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 283 then\n# line 1658 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1; \tbegin\n\t\t @cs = 535\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 277 then\n# line 1660 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 281 then\n# line 1666 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 535\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 276 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 88 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 535; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 89 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1; \tbegin\n\t\t @cs = 535\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 56 then\n# line 1681 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tUNARY_NUM, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = p - 1; @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 314 then\n# line 1698 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n type = delimiter = tok[0].chr\n p = p - 1; \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 308 then\n# line 1705 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n type, delimiter = @source_buffer.slice(@ts).chr, tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 54 then\n# line 1712 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n type, delimiter = tok[0..-2], tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 327 then\n# line 1768 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = p - 1; p = p - 1;\n emit(:tSYMBEG, tok(@ts, @ts + 1), @ts, @ts + 1)\n \tbegin\n\t\t @cs = 325\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 315 then\n# line 1776 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n type, delimiter = tok, tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 326 then\n# line 1784 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tSYMBOL, tok(@ts + 1, @ts + 2))\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 68 then\n# line 1798 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tSYMBOL, tok(@ts + 1), @ts)\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 331 then\n# line 1825 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n escape = { \" \" => '\\s', \"\\r\" => '\\r', \"\\n\" => '\\n', \"\\t\" => '\\t',\n \"\\v\" => '\\v', \"\\f\" => '\\f' }[@source_buffer.slice(@ts + 1)]\n diagnostic :warning, :invalid_escape_use, { :escape => escape }, range\n\n p = @ts - 1\n \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 330 then\n# line 1835 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n diagnostic :fatal, :incomplete_escape, nil, range(@ts, @ts + 1)\n end\n\t\tend\n\twhen 316 then\n# line 1874 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION_BEGIN)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 51 then\n# line 1894 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = p - 1;\n\n if version?(18)\n ident = tok(@ts, @te - 2)\n\n emit((@source_buffer.slice(@ts) =~ /[A-Z]/) ? :tCONSTANT : :tIDENTIFIER,\n ident, @ts, @te - 2)\n p = p - 1; # continue as a symbol\n\n if !@static_env.nil? && @static_env.declared?(ident)\n @cs = 767;\n else\n @cs = (arg_or_cmdarg);\n end\n else\n emit(:tLABEL, tok(@ts, @te - 2), @ts, @te - 1)\n @cs = 752;\n end\n\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 48 then\n# line 1936 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tIDENTIFIER, ident_tok, ident_ts, ident_te)\n p = ident_te - 1\n\n if !@static_env.nil? && @static_env.declared?(ident_tok) && @version < 25\n @cs = 438;\n else\n @cs = 497;\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 302 then\n# line 1955 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = @ts - 1\n @cs_before_block_comment = @cs\n \tbegin\n\t\t @cs = 181\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 55 then\n# line 1971 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1; \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 286 then\n# line 517 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 313 then\n# line 1681 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tUNARY_NUM, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = p - 1; @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 312 then\n# line 1688 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tSTAR, '*'.freeze)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 309 then\n# line 1712 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n type, delimiter = tok[0..-2], tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 307 then\n# line 1718 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :fatal, :string_eof, nil, range(@ts, @ts + 1)\n end\n\t\tend\n\twhen 317 then\n# line 1798 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1), @ts)\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 329 then\n# line 1835 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :fatal, :incomplete_escape, nil, range(@ts, @ts + 1)\n end\n\t\tend\n\twhen 335 then\n# line 1841 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n p = @ts - 1\n \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 310 then\n# line 1874 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION_BEGIN)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 289 then\n# line 1271 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 438; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 299 then\n# line 1952 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 301 then\n# line 1955 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n p = @ts - 1\n @cs_before_block_comment = @cs\n \tbegin\n\t\t @cs = 181\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 304 then\n# line 1971 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1; \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 53 then\n# line 1718 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n diagnostic :fatal, :string_eof, nil, range(@ts, @ts + 1)\n end\n\t\tend\n\twhen 71 then\n# line 1812 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 47 then\n# line 1271 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 438; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 52 then\n# line 1952 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n\t\tend\n\twhen 67 then\n# line 1971 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = @ts - 1; \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 50 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 94 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tUNARY_NUM, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = p - 1; @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 113 then\n\tbegin begin p = (( @te))-1; end\n emit_table(PUNCTUATION_BEGIN)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 114 then\n\tbegin begin p = (( @te))-1; end\n emit(:kRESCUE, 'rescue'.freeze, @ts, tm)\n p = tm - 1\n @cs = 511; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 115 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS_BEGIN)\n @cs = 759; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 117 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1\n \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\twhen 118 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 438; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 122 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1; \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 365 then\n# line 1991 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 535\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 366 then\n# line 517 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 367 then\n# line 1979 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 371 then\n# line 1991 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 535\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 74 then\n# line 2001 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1\n \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 375 then\n# line 2006 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n \tbegin\n\t\t @cs = (push_literal(tok, tok, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 374 then\n# line 2016 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 535\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 373 then\n# line 517 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 377 then\n# line 2010 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 376 then\n# line 2016 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 535\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 73 then\n# line 2016 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = p - 1; \tbegin\n\t\t @cs = 535\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 411 then\n# line 2027 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tLAMBDA, '->'.freeze, @ts, @ts + 2)\n\n @lambda_stack.push @paren_nest\n @cs = 438; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 83 then\n# line 2064 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:kCLASS, 'class'.freeze, @ts, @ts + 5)\n emit(:tLSHFT, '<<'.freeze, @te - 2, @te)\n @cs = 759; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 385 then\n# line 2199 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n type, delimiter = tok, tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts, nil, false, false, true))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 76 then\n# line 2217 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1; \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 320\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 407 then\n# line 2224 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION)\n @cs = 445; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 459 then\n# line 2248 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit_table(PUNCTUATION)\n \tbegin\n\t\t @cs = 759\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 400 then\n# line 2257 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit_table(PUNCTUATION);\n @cs = 759; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 392 then\n# line 2267 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit_table(PUNCTUATION)\n\n if @version < 24\n @cond.lexpop\n @cmdarg.lexpop\n else\n @cond.pop\n @cmdarg.pop\n end\n\n if tok == '}'.freeze || tok == ']'.freeze\n if @version >= 25\n @cs = 767;\n else\n @cs = 503;\n end\n else # )\n # fnext expr_endfn; ?\n end\n\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 405 then\n# line 2292 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tOP_ASGN, tok(@ts, @te - 1))\n @cs = 535; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 390 then\n# line 2296 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tEH, '?'.freeze)\n @cs = 759; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 387 then\n# line 2304 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION)\n @cs = 535; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 389 then\n# line 2317 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tSEMI, ';'.freeze)\n @cs = 759; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 464 then\n# line 2320 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n diagnostic :error, :bare_backslash, nil, range(@ts, @ts + 1)\n p = p - 1;\n end\n\t\tend\n\twhen 384 then\n# line 2326 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n diagnostic :fatal, :unexpected, { :character => tok.inspect[1..-2] }\n end\n\t\tend\n\twhen 383 then\n# line 517 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 474 then\n# line 2060 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(KEYWORDS)\n @cs = 325; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 472 then\n# line 2064 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:kCLASS, 'class'.freeze, @ts, @ts + 5)\n emit(:tLSHFT, '<<'.freeze, @te - 2, @te)\n @cs = 759; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 471 then\n# line 2075 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(KEYWORDS)\n @cs = 759; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 414 then\n# line 2149 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :error, :no_dot_digit_literal\n end\n\t\tend\n\twhen 461 then\n# line 2209 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tCONSTANT)\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 404 then\n# line 2217 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1; \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 320\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 412 then\n# line 2224 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 445; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 466 then\n# line 1271 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 438; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 410 then\n# line 2248 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit_table(PUNCTUATION)\n \tbegin\n\t\t @cs = 759\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 406 then\n# line 2257 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit_table(PUNCTUATION);\n @cs = 759; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 399 then\n# line 2263 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 535; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 413 then\n# line 2304 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 535; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 397 then\n# line 2311 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 403 then\n# line 2326 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :fatal, :unexpected, { :character => tok.inspect[1..-2] }\n end\n\t\tend\n\twhen 81 then\n# line 2122 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 77 then\n# line 2149 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n diagnostic :error, :no_dot_digit_literal\n end\n\t\tend\n\twhen 80 then\n# line 2181 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 75 then\n# line 2326 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n diagnostic :fatal, :unexpected, { :character => tok.inspect[1..-2] }\n end\n\t\tend\n\twhen 78 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 135 then\n\tbegin begin p = (( @te))-1; end\n\n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n\n if tok == '{'.freeze\n emit(:tLAMBEG, '{'.freeze)\n else # 'do'\n emit(:kDO_LAMBDA, 'do'.freeze)\n end\n else\n if tok == '{'.freeze\n emit(:tLCURLY, '{'.freeze)\n else # 'do'\n emit_do\n end\n end\n\n @cs = 759; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 136 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 325; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 137 then\n\tbegin begin p = (( @te))-1; end\n emit(:kCLASS, 'class'.freeze, @ts, @ts + 5)\n emit(:tLSHFT, '<<'.freeze, @te - 2, @te)\n @cs = 759; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 138 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 535; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 139 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 759; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 140 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 511; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 141 then\n\tbegin begin p = (( @te))-1; end\n\n emit_table(KEYWORDS)\n\n if version?(18) && tok == 'not'.freeze\n @cs = 535; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 142 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18)\n emit(:tIDENTIFIER)\n\n unless !@static_env.nil? && @static_env.declared?(tok)\n @cs = (arg_or_cmdarg);\n end\n else\n emit(:k__ENCODING__, '__ENCODING__'.freeze)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 143 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 144 then\n\tbegin begin p = (( @te))-1; end\n\n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 146 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18, 19, 20)\n diagnostic :error,\n :trailing_in_number, { :character => tok(@te - 1, @te) },\n range(@te - 1, @te)\n else\n emit(:tINTEGER, tok(@ts, @te - 1).to_i, @ts, @te - 1)\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 147 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18, 19, 20)\n diagnostic :error,\n :trailing_in_number, { :character => tok(@te - 1, @te) },\n range(@te - 1, @te)\n else\n emit(:tFLOAT, tok(@ts, @te - 1).to_f, @ts, @te - 1)\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 148 then\n\tbegin begin p = (( @te))-1; end\n\n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 150 then\n\tbegin begin p = (( @te))-1; end\n emit(:tCONSTANT)\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 154 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 438; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 155 then\n\tbegin begin p = (( @te))-1; end\n\n if tm == @te\n # Suffix was consumed, e.g. foo!\n emit(:tFID)\n else\n # Suffix was not consumed, e.g. foo!=\n emit(:tIDENTIFIER, tok(@ts, tm), @ts, tm)\n p = tm - 1\n end\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 157 then\n\tbegin begin p = (( @te))-1; end\n\n emit_table(PUNCTUATION);\n @cs = 759; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 158 then\n\tbegin begin p = (( @te))-1; end\n emit_table(PUNCTUATION)\n @cs = 535; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 87 then\n# line 2338 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = tm - 1; \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 479 then\n# line 2341 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tNL, nil, @newline_s, @newline_s + 1)\n p = p - 1; @cs = 181; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 480 then\n# line 2341 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tNL, nil, @newline_s, @newline_s + 1)\n p = p - 1; @cs = 181; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 84 then\n# line 2341 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin emit(:tNL, nil, @newline_s, @newline_s + 1)\n p = p - 1; @cs = 181; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 483 then\n# line 2351 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit_comment(@eq_begin_s, @te)\n \tbegin\n\t\t @cs = (@cs_before_block_comment)\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 482 then\n# line 2359 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :fatal, :embedded_document, nil,\n range(@eq_begin_s, @eq_begin_s + '=begin'.length)\n end\n\t\tend\n\twhen 98 then\n# line 2369 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin @eq_begin_s = @ts\n \tbegin\n\t\t @cs = 948\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 2 then\n# line 2373 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = pe - 3 end\n\t\tend\n\twhen 90 then\n# line 2376 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 759\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 91 then\n# line 517 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 92 then\n# line 2366 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 97 then\n# line 2369 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin @eq_begin_s = @ts\n \tbegin\n\t\t @cs = 948\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 96 then\n# line 2376 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 759\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 1 then\n# line 2376 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = p - 1; \tbegin\n\t\t @cs = 759\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 66 then\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n\twhen 102 then\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 927 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 131 then\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 927 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 158 then\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 927 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 164 then\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 927 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 170 then\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 927 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 176 then\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 927 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 179 then\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 927 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 186 then\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 927 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 257 then\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1482 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Unlike expr_beg as invoked in the next rule, do not warn\n p = @ts - 1\n \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 248 then\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1541 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = tm - 1; \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 240 then\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1552 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = @ts - 1\n \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 332 then\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1825 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n escape = { \" \" => '\\s', \"\\r\" => '\\r', \"\\n\" => '\\n', \"\\t\" => '\\t',\n \"\\v\" => '\\v', \"\\f\" => '\\f' }[@source_buffer.slice(@ts + 1)]\n diagnostic :warning, :invalid_escape_use, { :escape => escape }, range\n\n p = @ts - 1\n \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 303 then\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1955 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = @ts - 1\n @cs_before_block_comment = @cs\n \tbegin\n\t\t @cs = 181\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 484 then\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 2351 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit_comment(@eq_begin_s, @te)\n \tbegin\n\t\t @cs = (@cs_before_block_comment)\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 481 then\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 2356 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n\t\tend\n\twhen 99 then\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 2369 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin @eq_begin_s = @ts\n \tbegin\n\t\t @cs = 948\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 3 then\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 2373 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = pe - 3 end\n\t\tend\n\twhen 435 then\n# line 625 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tRATIONAL, Rational(chars)) } \t\tend\n# line 2122 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 433 then\n# line 626 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tIMAGINARY, Complex(0, chars)) } \t\tend\n# line 2122 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 438 then\n# line 627 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tIMAGINARY, Complex(0, Rational(chars))) } \t\tend\n# line 2122 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 436 then\n# line 628 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars, @ts, @te - 2); p -= 2 } \t\tend\n# line 2122 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 434 then\n# line 629 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars, @ts, @te - 2); p -= 2 } \t\tend\n# line 2122 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 437 then\n# line 630 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars, @ts, @te - 6); p -= 6 } \t\tend\n# line 2122 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 426 then\n# line 634 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tIMAGINARY, Complex(0, Float(chars))) } \t\tend\n# line 2181 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 427 then\n# line 635 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tFLOAT, Float(chars), @ts, @te - 2); p -= 2 } \t\tend\n# line 2181 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 428 then\n# line 639 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tRATIONAL, Rational(chars)) } \t\tend\n# line 2181 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 430 then\n# line 640 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tIMAGINARY, Complex(0, Rational(chars))) } \t\tend\n# line 2181 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 429 then\n# line 641 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tFLOAT, Float(chars), @ts, @te - 6); p -= 6 } \t\tend\n# line 2181 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 125 then\n# line 656 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = \"\"\n\n codepoints = tok(@escape_s + 2, p - 1)\n codepoint_s = @escape_s + 2\n\n if @version < 24\n if codepoints.start_with?(\" \") || codepoints.start_with?(\"\\t\")\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(@escape_s + 2, @escape_s + 3)\n end\n\n if spaces_p = codepoints.index(/[ \\t]{2}/)\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(codepoint_s + spaces_p + 1, codepoint_s + spaces_p + 2)\n end\n\n if codepoints.end_with?(\" \") || codepoints.end_with?(\"\\t\")\n diagnostic :fatal, :invalid_unicode_escape, nil, range(p - 1, p)\n end\n end\n\n codepoints.scan(/([0-9a-fA-F]+)|([ \\t]+)/).each do |(codepoint_str, spaces)|\n if spaces\n codepoint_s += spaces.length\n else\n codepoint = codepoint_str.to_i(16)\n\n if codepoint >= 0x110000\n diagnostic :error, :unicode_point_too_large, nil,\n range(codepoint_s, codepoint_s + codepoint_str.length)\n break\n end\n\n @escape += codepoint.chr(Encoding::UTF_8)\n codepoint_s += codepoint_str.length\n end\n end\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 152 then\n# line 656 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = \"\"\n\n codepoints = tok(@escape_s + 2, p - 1)\n codepoint_s = @escape_s + 2\n\n if @version < 24\n if codepoints.start_with?(\" \") || codepoints.start_with?(\"\\t\")\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(@escape_s + 2, @escape_s + 3)\n end\n\n if spaces_p = codepoints.index(/[ \\t]{2}/)\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(codepoint_s + spaces_p + 1, codepoint_s + spaces_p + 2)\n end\n\n if codepoints.end_with?(\" \") || codepoints.end_with?(\"\\t\")\n diagnostic :fatal, :invalid_unicode_escape, nil, range(p - 1, p)\n end\n end\n\n codepoints.scan(/([0-9a-fA-F]+)|([ \\t]+)/).each do |(codepoint_str, spaces)|\n if spaces\n codepoint_s += spaces.length\n else\n codepoint = codepoint_str.to_i(16)\n\n if codepoint >= 0x110000\n diagnostic :error, :unicode_point_too_large, nil,\n range(codepoint_s, codepoint_s + codepoint_str.length)\n break\n end\n\n @escape += codepoint.chr(Encoding::UTF_8)\n codepoint_s += codepoint_str.length\n end\n end\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 353 then\n# line 656 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = \"\"\n\n codepoints = tok(@escape_s + 2, p - 1)\n codepoint_s = @escape_s + 2\n\n if @version < 24\n if codepoints.start_with?(\" \") || codepoints.start_with?(\"\\t\")\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(@escape_s + 2, @escape_s + 3)\n end\n\n if spaces_p = codepoints.index(/[ \\t]{2}/)\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(codepoint_s + spaces_p + 1, codepoint_s + spaces_p + 2)\n end\n\n if codepoints.end_with?(\" \") || codepoints.end_with?(\"\\t\")\n diagnostic :fatal, :invalid_unicode_escape, nil, range(p - 1, p)\n end\n end\n\n codepoints.scan(/([0-9a-fA-F]+)|([ \\t]+)/).each do |(codepoint_str, spaces)|\n if spaces\n codepoint_s += spaces.length\n else\n codepoint = codepoint_str.to_i(16)\n\n if codepoint >= 0x110000\n diagnostic :error, :unicode_point_too_large, nil,\n range(codepoint_s, codepoint_s + codepoint_str.length)\n break\n end\n\n @escape += codepoint.chr(Encoding::UTF_8)\n codepoint_s += codepoint_str.length\n end\n end\n \t\tend\n# line 1812 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 109 then\n# line 696 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 136 then\n# line 696 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 337 then\n# line 696 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 1812 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 112 then\n# line 703 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_escape\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 139 then\n# line 703 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_escape\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 340 then\n# line 703 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_escape\n \t\tend\n# line 1812 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 114 then\n# line 722 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 141 then\n# line 722 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 342 then\n# line 722 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 1812 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 111 then\n# line 729 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s, p).to_i(8) % 0x100) \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 138 then\n# line 729 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s, p).to_i(8) % 0x100) \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 339 then\n# line 729 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s, p).to_i(8) % 0x100) \t\tend\n# line 1812 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 128 then\n# line 733 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s + 1, p).to_i(16)) \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 155 then\n# line 733 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s + 1, p).to_i(16)) \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 356 then\n# line 733 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s + 1, p).to_i(16)) \t\tend\n# line 1812 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 127 then\n# line 737 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_hex_escape, nil, range(@escape_s - 1, p + 2)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 154 then\n# line 737 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_hex_escape, nil, range(@escape_s - 1, p + 2)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 355 then\n# line 737 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_hex_escape, nil, range(@escape_s - 1, p + 2)\n \t\tend\n# line 1812 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 122 then\n# line 743 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = tok(@escape_s + 1, p).to_i(16).chr(Encoding::UTF_8) \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 149 then\n# line 743 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = tok(@escape_s + 1, p).to_i(16).chr(Encoding::UTF_8) \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 350 then\n# line 743 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = tok(@escape_s + 1, p).to_i(16).chr(Encoding::UTF_8) \t\tend\n# line 1812 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 121 then\n# line 747 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 148 then\n# line 747 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 349 then\n# line 747 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 1812 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 126 then\n# line 753 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 153 then\n# line 753 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 354 then\n# line 753 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 1812 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 123 then\n# line 767 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :unterminated_unicode, nil, range(p - 1, p)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 150 then\n# line 767 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :unterminated_unicode, nil, range(p - 1, p)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 351 then\n# line 767 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :unterminated_unicode, nil, range(p - 1, p)\n \t\tend\n# line 1812 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 110 then\n# line 793 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 137 then\n# line 793 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 338 then\n# line 793 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 1812 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 161 then\n# line 799 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 166 then\n# line 799 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 57 then\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 30 then\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1496 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 32 then\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1512 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 34 then\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1540 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 201 then\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1371 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 220 then\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1389 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 228 then\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1421 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 33 then\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1552 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = @ts - 1\n \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 259 then\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1557 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 251 then\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1563 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 270 then\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1639 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 282 then\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1660 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 278 then\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1663 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 58 then\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1681 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tUNARY_NUM, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = p - 1; @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 49 then\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1936 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tIDENTIFIER, ident_tok, ident_ts, ident_te)\n p = ident_te - 1\n\n if !@static_env.nil? && @static_env.declared?(ident_tok) && @version < 25\n @cs = 438;\n else\n @cs = 497;\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 300 then\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1952 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 372 then\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1979 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 368 then\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1982 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @in_kwarg\n p = p - 1; \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n else\n \tbegin\n\t\t @cs = 181\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 381 then\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2010 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 378 then\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2013 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \tbegin\n\t\t @cs = 181\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 465 then\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2311 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 398 then\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2314 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \tbegin\n\t\t @cs = 945\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 93 then\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2366 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 250 then\n# line 1027 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n current_literal = literal\n if current_literal\n current_literal.start_interp_brace\n end\n \t\tend\n# line 1465 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n emit(:tLAMBEG, '{'.freeze, @te - 1, @te)\n else\n emit(:tLCURLY, '{'.freeze, @te - 1, @te)\n end\n @cs = 759; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 272 then\n# line 1027 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n current_literal = literal\n if current_literal\n current_literal.start_interp_brace\n end\n \t\tend\n# line 1625 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n emit(:tLAMBEG, '{'.freeze)\n else\n emit(:tLBRACE_ARG, '{'.freeze)\n end\n @cs = 759; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 364 then\n# line 1027 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n current_literal = literal\n if current_literal\n current_literal.start_interp_brace\n end\n \t\tend\n# line 1852 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n emit(:tLAMBEG, '{'.freeze)\n else\n emit(:tLBRACE, '{'.freeze)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 477 then\n# line 1027 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n current_literal = literal\n if current_literal\n current_literal.start_interp_brace\n end\n \t\tend\n# line 2035 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n\n if tok == '{'.freeze\n emit(:tLAMBEG, '{'.freeze)\n else # 'do'\n emit(:kDO_LAMBDA, 'do'.freeze)\n end\n else\n if tok == '{'.freeze\n emit(:tLCURLY, '{'.freeze)\n else # 'do'\n emit_do\n end\n end\n\n @cs = 759; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 478 then\n# line 1036 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n current_literal = literal\n if current_literal\n if current_literal.end_interp_brace_and_try_closing\n if version?(18, 19)\n emit(:tRCURLY, '}'.freeze, p - 1, p)\n if @version < 24\n @cond.lexpop\n @cmdarg.lexpop\n else\n @cond.pop\n @cmdarg.pop\n end\n else\n emit(:tSTRING_DEND, '}'.freeze, p - 1, p)\n end\n\n if current_literal.saved_herebody_s\n @herebody_s = current_literal.saved_herebody_s\n end\n\n\n p = p - 1;\n @cs = (next_state_for_literal(current_literal));\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n \t\tend\n# line 2267 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit_table(PUNCTUATION)\n\n if @version < 24\n @cond.lexpop\n @cmdarg.lexpop\n else\n @cond.pop\n @cmdarg.pop\n end\n\n if tok == '}'.freeze || tok == ']'.freeze\n if @version >= 25\n @cs = 767;\n else\n @cs = 503;\n end\n else # )\n # fnext expr_endfn; ?\n end\n\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 60 then\n# line 1183 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n\twhen 64 then\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 204 then\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1371 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 219 then\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1389 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 231 then\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1421 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 253 then\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1560 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 268 then\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1639 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 280 then\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1660 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 306 then\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1952 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 370 then\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1979 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 380 then\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2010 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 402 then\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2311 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 95 then\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2366 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 225 then\n# line 1227 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1411 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tFID, tok(@ts, tm), @ts, tm)\n @cs = (arg_or_cmdarg); p = tm - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 318 then\n# line 1227 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1790 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 291 then\n# line 1227 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1925 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 395 then\n# line 1227 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 135 then\n\tbegin begin p = (( @te))-1; end\n\n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n\n if tok == '{'.freeze\n emit(:tLAMBEG, '{'.freeze)\n else # 'do'\n emit(:kDO_LAMBDA, 'do'.freeze)\n end\n else\n if tok == '{'.freeze\n emit(:tLCURLY, '{'.freeze)\n else # 'do'\n emit_do\n end\n end\n\n @cs = 759; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 136 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 325; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 137 then\n\tbegin begin p = (( @te))-1; end\n emit(:kCLASS, 'class'.freeze, @ts, @ts + 5)\n emit(:tLSHFT, '<<'.freeze, @te - 2, @te)\n @cs = 759; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 138 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 535; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 139 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 759; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 140 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 511; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 141 then\n\tbegin begin p = (( @te))-1; end\n\n emit_table(KEYWORDS)\n\n if version?(18) && tok == 'not'.freeze\n @cs = 535; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 142 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18)\n emit(:tIDENTIFIER)\n\n unless !@static_env.nil? && @static_env.declared?(tok)\n @cs = (arg_or_cmdarg);\n end\n else\n emit(:k__ENCODING__, '__ENCODING__'.freeze)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 143 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 144 then\n\tbegin begin p = (( @te))-1; end\n\n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 146 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18, 19, 20)\n diagnostic :error,\n :trailing_in_number, { :character => tok(@te - 1, @te) },\n range(@te - 1, @te)\n else\n emit(:tINTEGER, tok(@ts, @te - 1).to_i, @ts, @te - 1)\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 147 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18, 19, 20)\n diagnostic :error,\n :trailing_in_number, { :character => tok(@te - 1, @te) },\n range(@te - 1, @te)\n else\n emit(:tFLOAT, tok(@ts, @te - 1).to_f, @ts, @te - 1)\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 148 then\n\tbegin begin p = (( @te))-1; end\n\n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 150 then\n\tbegin begin p = (( @te))-1; end\n emit(:tCONSTANT)\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 154 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 438; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 155 then\n\tbegin begin p = (( @te))-1; end\n\n if tm == @te\n # Suffix was consumed, e.g. foo!\n emit(:tFID)\n else\n # Suffix was not consumed, e.g. foo!=\n emit(:tIDENTIFIER, tok(@ts, tm), @ts, tm)\n p = tm - 1\n end\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 157 then\n\tbegin begin p = (( @te))-1; end\n\n emit_table(PUNCTUATION);\n @cs = 759; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 158 then\n\tbegin begin p = (( @te))-1; end\n emit_table(PUNCTUATION)\n @cs = 535; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 226 then\n# line 1228 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1411 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tFID, tok(@ts, tm), @ts, tm)\n @cs = (arg_or_cmdarg); p = tm - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 319 then\n# line 1228 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1790 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 293 then\n# line 1228 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1925 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 396 then\n# line 1228 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 2231 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if tm == @te\n # Suffix was consumed, e.g. foo!\n emit(:tFID)\n else\n # Suffix was not consumed, e.g. foo!=\n emit(:tIDENTIFIER, tok(@ts, tm), @ts, tm)\n p = tm - 1\n end\n @cs = 466; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 320 then\n# line 1233 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1790 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 294 then\n# line 1233 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1925 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 321 then\n# line 1234 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1790 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 295 then\n# line 1234 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1925 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 325 then\n# line 1235 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1790 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 298 then\n# line 1235 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1925 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 324 then\n# line 1236 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1790 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 297 then\n# line 1236 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 94 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tUNARY_NUM, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = p - 1; @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 113 then\n\tbegin begin p = (( @te))-1; end\n emit_table(PUNCTUATION_BEGIN)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 114 then\n\tbegin begin p = (( @te))-1; end\n emit(:kRESCUE, 'rescue'.freeze, @ts, tm)\n p = tm - 1\n @cs = 511; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 115 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS_BEGIN)\n @cs = 759; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 117 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1\n \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\twhen 118 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 438; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 122 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1; \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 322 then\n# line 1237 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 3 \t\tend\n# line 1790 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 296 then\n# line 1237 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 3 \t\tend\n# line 1925 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 323 then\n# line 1242 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1790 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 460 then\n# line 1247 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 2213 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tCONSTANT, tok(@ts, tm), @ts, tm)\n p = tm - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 249 then\n# line 1253 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n \t\tend\n# line 1459 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tLBRACK, '['.freeze, @te - 1, @te)\n @cs = 535; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 357 then\n# line 1253 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n \t\tend\n# line 1864 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tLBRACK, '['.freeze)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 463 then\n# line 1253 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n \t\tend\n# line 2300 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tLBRACK2, '['.freeze)\n @cs = 535; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 241 then\n# line 1260 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n \t\tend\n# line 1440 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if version?(18)\n emit(:tLPAREN2, '('.freeze, @te - 1, @te)\n @cs = 759; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n emit(:tLPAREN_ARG, '('.freeze, @te - 1, @te)\n @cs = 535; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 254 then\n# line 1260 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n \t\tend\n# line 1453 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tLPAREN2, '('.freeze)\n @cs = 535; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 262 then\n# line 1260 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n \t\tend\n# line 1579 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tLPAREN_ARG, '('.freeze, @te - 1, @te)\n if version?(18)\n @cs = 759; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = 535; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 311 then\n# line 1260 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n \t\tend\n# line 1869 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tLPAREN, '('.freeze)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 408 then\n# line 1260 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n \t\tend\n# line 2263 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 535; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 409 then\n# line 1266 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @paren_nest -= 1\n \t\tend\n# line 2267 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit_table(PUNCTUATION)\n\n if @version < 24\n @cond.lexpop\n @cmdarg.lexpop\n else\n @cond.pop\n @cmdarg.pop\n end\n\n if tok == '}'.freeze || tok == ']'.freeze\n if @version >= 25\n @cs = 767;\n else\n @cs = 503;\n end\n else # )\n # fnext expr_endfn; ?\n end\n\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 70 then\n# line 1730 \"lib/parser/lexer.rl\"\n\t\tbegin\n heredoc_e = p \t\tend\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 328 then\n# line 1731 \"lib/parser/lexer.rl\"\n\t\tbegin\n new_herebody_s = p \t\tend\n# line 1732 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n tok(@ts, heredoc_e) =~ /^<<(-?)(~?)([\"'`]?)(.*)\\3$/m\n\n indent = !$1.empty? || !$2.empty?\n dedent_body = !$2.empty?\n type = $3.empty? ? '<<\"'.freeze : ('<<'.freeze + $3)\n delimiter = $4\n\n if @version >= 24\n if delimiter.count(\"\\n\") > 0\n if delimiter.end_with?(\"\\n\")\n diagnostic :warning, :heredoc_id_ends_with_nl, nil, range(@ts, @ts + 1)\n delimiter = delimiter.rstrip\n else\n diagnostic :fatal, :heredoc_id_has_newline, nil, range(@ts, @ts + 1)\n end\n end\n end\n\n if dedent_body && version?(18, 19, 20, 21, 22)\n emit(:tLSHFT, '<<'.freeze, @ts, @ts + 2)\n p = @ts + 1\n @cs = 535; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (push_literal(type, delimiter, @ts, heredoc_e, indent, dedent_body));\n\n @herebody_s ||= new_herebody_s\n p = @herebody_s - 1\n end\n end\n\t\tend\n\twhen 333 then\n# line 1810 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = nil \t\tend\n# line 1812 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 360 then\n# line 1879 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1880 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:kRESCUE, 'rescue'.freeze, @ts, tm)\n p = tm - 1\n @cs = 511; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 450 then\n# line 2115 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 16; @num_digits_s = p \t\tend\n# line 2121 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 444 then\n# line 2116 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = p \t\tend\n# line 2121 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 447 then\n# line 2117 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = p \t\tend\n# line 2121 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 441 then\n# line 2118 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 2; @num_digits_s = p \t\tend\n# line 2121 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 456 then\n# line 2119 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n# line 2121 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 419 then\n# line 2120 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = @ts \t\tend\n# line 2121 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 457 then\n# line 2121 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 624 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n\twhen 86 then\n# line 2337 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 2338 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = tm - 1; \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 8 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 423 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2179 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 210 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1330 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 39;\t\tend\n\twhen 197 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1334 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 40;\t\tend\n\twhen 193 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1338 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 41;\t\tend\n\twhen 26 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1500 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 67;\t\tend\n\twhen 243 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1513 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 68;\t\tend\n\twhen 27 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1552 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 73;\t\tend\n\twhen 236 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1557 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 74;\t\tend\n\twhen 263 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1589 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 80;\t\tend\n\twhen 45 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1602 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 81;\t\tend\n\twhen 284 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1654 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 88;\t\tend\n\twhen 273 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1658 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 89;\t\tend\n\twhen 287 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1874 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 113;\t\tend\n\twhen 359 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1880 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 114;\t\tend\n\twhen 358 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1886 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 115;\t\tend\n\twhen 72 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1925 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 117;\t\tend\n\twhen 285 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1271 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 118;\t\tend\n\twhen 288 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1971 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 122;\t\tend\n\twhen 473 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2035 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 135;\t\tend\n\twhen 468 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2060 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 136;\t\tend\n\twhen 476 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2070 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 138;\t\tend\n\twhen 469 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2075 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 139;\t\tend\n\twhen 470 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2079 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 140;\t\tend\n\twhen 475 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2083 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 141;\t\tend\n\twhen 467 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2094 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 142;\t\tend\n\twhen 462 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2108 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 143;\t\tend\n\twhen 388 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2122 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 144;\t\tend\n\twhen 421 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2166 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 147;\t\tend\n\twhen 79 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2181 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 148;\t\tend\n\twhen 391 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2209 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 150;\t\tend\n\twhen 382 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1271 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 154;\t\tend\n\twhen 394 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2231 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 155;\t\tend\n\twhen 386 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2257 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 157;\t\tend\n\twhen 393 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2263 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 158;\t\tend\n\twhen 167 then\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 927 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n# line 799 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n\twhen 115 then\n# line 696 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 707 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 142 then\n# line 696 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 707 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 343 then\n# line 696 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 707 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 1812 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 120 then\n# line 696 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 711 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 147 then\n# line 696 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 711 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 348 then\n# line 696 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 711 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1812 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 119 then\n# line 717 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source_buffer.slice(p - 1).chr \t\tend\n# line 711 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 146 then\n# line 717 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source_buffer.slice(p - 1).chr \t\tend\n# line 711 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 347 then\n# line 717 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source_buffer.slice(p - 1).chr \t\tend\n# line 711 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1812 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 117 then\n# line 722 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 711 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 144 then\n# line 722 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 711 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 345 then\n# line 722 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 711 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1812 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 113 then\n# line 723 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source_buffer.slice(p - 1).chr \t\tend\n# line 707 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 140 then\n# line 723 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source_buffer.slice(p - 1).chr \t\tend\n# line 707 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 341 then\n# line 723 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source_buffer.slice(p - 1).chr \t\tend\n# line 707 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 1812 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 124 then\n# line 753 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 767 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :unterminated_unicode, nil, range(p - 1, p)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 151 then\n# line 753 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 767 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :unterminated_unicode, nil, range(p - 1, p)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 352 then\n# line 753 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 767 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :unterminated_unicode, nil, range(p - 1, p)\n \t\tend\n# line 1812 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 107 then\n# line 799 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n# line 793 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 135 then\n# line 799 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n# line 793 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 336 then\n# line 799 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n# line 793 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 1812 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 61 then\n# line 1183 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 203 then\n# line 1183 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1371 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 218 then\n# line 1183 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1389 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 230 then\n# line 1183 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1421 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 252 then\n# line 1183 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1560 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \tbegin\n\t\t @cs = 767\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 267 then\n# line 1183 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1639 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 279 then\n# line 1183 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1660 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 305 then\n# line 1183 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1952 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 369 then\n# line 1183 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1979 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 379 then\n# line 1183 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2010 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 401 then\n# line 1183 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2311 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 94 then\n# line 1183 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2366 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 452 then\n# line 2119 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n# line 2121 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 624 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n\twhen 416 then\n# line 2120 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = @ts \t\tend\n# line 2121 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 624 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n\twhen 431 then\n# line 2121 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 624 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2122 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 424 then\n# line 2178 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 633 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tFLOAT, Float(chars)) } \t\tend\n# line 2181 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 420 then\n# line 2179 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 633 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tFLOAT, Float(chars)) } \t\tend\n# line 2181 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 258 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 491 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1557 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 74;\t\tend\n\twhen 35 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1552 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 73;\t\tend\n\twhen 46 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1602 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 81;\t\tend\n\twhen 65 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1681 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 94;\t\tend\n\twhen 82 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2064 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 137;\t\tend\n\twhen 37 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1512 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1513 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 68;\t\tend\n\twhen 362 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1879 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1925 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 117;\t\tend\n\twhen 361 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1879 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1271 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 118;\t\tend\n\twhen 453 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2119 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n# line 2122 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 144;\t\tend\n\twhen 118 then\n# line 696 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 707 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 711 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 145 then\n# line 696 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 707 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 711 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 346 then\n# line 696 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 707 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 711 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1812 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 116 then\n# line 723 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source_buffer.slice(p - 1).chr \t\tend\n# line 707 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 711 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 143 then\n# line 723 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source_buffer.slice(p - 1).chr \t\tend\n# line 707 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 711 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 872 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.squiggly_heredoc? && escaped_char == \"\\n\".freeze\n # Squiggly heredocs like\n # <<~-HERE\n # 1\\\n # 2\n # HERE\n # treat '\\' as a line continuation, but still dedent the body, so the heredoc above becomes \"12\\n\".\n # This information is emitted as is, without escaping,\n # later this escape sequence (\\\\\\n) gets handled manually in the Lexer::Dedenter\n current_literal.extend_string(tok, @ts, @te)\n elsif current_literal.supports_line_continuation_via_slash? && escaped_char == \"\\n\".freeze\n # Heredocs, regexp and a few other types of literals support line\n # continuation via \\\\\\n sequence. The code like\n # \"a\\\n # b\"\n # must be parsed as \"ab\"\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n elsif current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed (in cases like \"\\\\C-\\\\\\n\\\\M-x\")\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 344 then\n# line 723 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source_buffer.slice(p - 1).chr \t\tend\n# line 707 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 711 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1812 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value.getbyte(0))\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 767; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 448 then\n# line 2115 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 16; @num_digits_s = p \t\tend\n# line 2121 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 624 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2122 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 442 then\n# line 2116 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = p \t\tend\n# line 2121 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 624 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2122 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 445 then\n# line 2117 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = p \t\tend\n# line 2121 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 624 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2122 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 439 then\n# line 2118 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 2; @num_digits_s = p \t\tend\n# line 2121 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 624 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2122 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 451 then\n# line 2119 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n# line 2121 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 624 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2122 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 415 then\n# line 2120 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = @ts \t\tend\n# line 2121 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 624 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2122 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 31 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 839 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1512 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1513 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 68;\t\tend\n\twhen 62 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1183 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1186 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1681 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 94;\t\tend\n\twhen 458 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2121 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 624 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2154 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 146;\t\tend\n\twhen 454 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2119 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n# line 2121 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 624 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2154 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 146;\t\tend\n\twhen 418 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2120 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = @ts \t\tend\n# line 2121 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 624 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2154 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 146;\t\tend\n# line 22832 \"lib/parser/lexer.rb\"\n\tend\n\tend\n\tend\n\tif _goto_level <= _again\n\tcase _lex_to_state_actions[ @cs] \n\twhen 88 then\n# line 1 \"NONE\"\n\t\tbegin\n @ts = nil;\t\tend\n# line 22842 \"lib/parser/lexer.rb\"\n\tend\n\n\tif @cs == 0\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\tp += 1\n\tif p != pe\n\t\t_goto_level = _resume\n\t\tnext\n\tend\n\tend\n\tif _goto_level <= _test_eof\n\tif p == eof\n\tif _lex_eof_trans[ @cs] > 0\n\t\t_trans = _lex_eof_trans[ @cs] - 1;\n\t\t_goto_level = _eof_trans\n\t\tnext;\n\tend\n\tend\n\n\tend\n\tif _goto_level <= _out\n\t\tbreak\n\tend\nend\n\tend\n\n# line 283 \"lib/parser/lexer.rl\"\n # %\n\n @p = p\n\n if @token_queue.any?\n @token_queue.shift\n elsif @cs == klass.lex_error\n [ false, [ '$error'.freeze, range(p - 1, p) ] ]\n else\n eof = @source_pts.size\n [ false, [ '$eof'.freeze, range(eof, eof) ] ]\n end\n end", "def advance\n if @token_queue.any?\n return @token_queue.shift\n end\n\n # Ugly, but dependent on Ragel output. Consider refactoring it somehow.\n klass = self.class\n _lex_trans_keys = klass.send :_lex_trans_keys\n _lex_key_spans = klass.send :_lex_key_spans\n _lex_index_offsets = klass.send :_lex_index_offsets\n _lex_indicies = klass.send :_lex_indicies\n _lex_trans_targs = klass.send :_lex_trans_targs\n _lex_trans_actions = klass.send :_lex_trans_actions\n _lex_to_state_actions = klass.send :_lex_to_state_actions\n _lex_from_state_actions = klass.send :_lex_from_state_actions\n _lex_eof_trans = klass.send :_lex_eof_trans\n\n pe = @source_pts.size + 2\n p, eof = @p, pe\n\n @command_state = (@cs == klass.lex_en_expr_value ||\n @cs == klass.lex_en_line_begin)\n\n \n# line 11015 \"lib/parser/lexer.rb\"\nbegin\n\ttestEof = false\n\t_slen, _trans, _keys, _inds, _acts, _nacts = nil\n\t_goto_level = 0\n\t_resume = 10\n\t_eof_trans = 15\n\t_again = 20\n\t_test_eof = 30\n\t_out = 40\n\twhile true\n\tif _goto_level <= 0\n\tif p == pe\n\t\t_goto_level = _test_eof\n\t\tnext\n\tend\n\tif @cs == 0\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\tend\n\tif _goto_level <= _resume\n\tcase _lex_from_state_actions[ @cs] \n\twhen 84 then\n# line 1 \"NONE\"\n\t\tbegin\n @ts = p\n\t\tend\n# line 11043 \"lib/parser/lexer.rb\"\n\tend\n\t_keys = @cs << 1\n\t_inds = _lex_index_offsets[ @cs]\n\t_slen = _lex_key_spans[ @cs]\n\t_wide = ( (@source_pts[p] || 0))\n\t_trans = if ( _slen > 0 && \n\t\t\t_lex_trans_keys[_keys] <= _wide && \n\t\t\t_wide <= _lex_trans_keys[_keys + 1] \n\t\t ) then\n\t\t\t_lex_indicies[ _inds + _wide - _lex_trans_keys[_keys] ] \n\t\t else \n\t\t\t_lex_indicies[ _inds + _slen ]\n\t\t end\n\tend\n\tif _goto_level <= _eof_trans\n\t @cs = _lex_trans_targs[_trans]\n\tif _lex_trans_actions[_trans] != 0\n\tcase _lex_trans_actions[_trans]\n\twhen 28 then\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 103 then\n# line 772 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n\twhen 29 then\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n\twhen 56 then\n# line 1128 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n\twhen 60 then\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n\twhen 283 then\n# line 1172 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 36 then\n# line 1441 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 38 then\n# line 1457 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 40 then\n# line 1485 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 66 then\n# line 1675 \"lib/parser/lexer.rl\"\n\t\tbegin\n heredoc_e = p \t\tend\n\twhen 323 then\n# line 1729 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = nil \t\tend\n\twhen 352 then\n# line 1802 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 428 then\n# line 2020 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 16; @num_digits_s = p \t\tend\n\twhen 422 then\n# line 2021 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = p \t\tend\n\twhen 425 then\n# line 2022 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = p \t\tend\n\twhen 419 then\n# line 2023 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 2; @num_digits_s = p \t\tend\n\twhen 434 then\n# line 2024 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n\twhen 402 then\n# line 2025 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = @ts \t\tend\n\twhen 414 then\n# line 2026 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 409 then\n# line 2083 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 407 then\n# line 2084 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 80 then\n# line 2219 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 7 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n\twhen 100 then\n# line 1013 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DBEG, '#{'.freeze)\n\n if current_literal.heredoc?\n current_literal.saved_herebody_s = @herebody_s\n @herebody_s = nil\n end\n\n current_literal.start_interp_brace\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 765\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 5 then\n# line 958 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 328\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 96 then\n# line 883 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 95 then\n# line 821 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 758;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 101 then\n# line 958 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 328\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 98 then\n# line 945 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n literal.extend_space @ts, @te\n end\n\t\tend\n\twhen 99 then\n# line 821 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 758;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 6 then\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 4 then\n# line 821 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 758;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 127 then\n# line 1013 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DBEG, '#{'.freeze)\n\n if current_literal.heredoc?\n current_literal.saved_herebody_s = @herebody_s\n @herebody_s = nil\n end\n\n current_literal.start_interp_brace\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 765\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 10 then\n# line 958 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 328\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 124 then\n# line 883 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 123 then\n# line 821 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 758;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 128 then\n# line 958 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 328\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 126 then\n# line 821 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 758;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 11 then\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 9 then\n# line 821 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 758;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 150 then\n# line 883 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 149 then\n# line 821 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 758;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 152 then\n# line 945 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n literal.extend_space @ts, @te\n end\n\t\tend\n\twhen 153 then\n# line 821 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 758;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 156 then\n# line 883 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 155 then\n# line 821 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 758;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 158 then\n# line 821 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 758;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 165 then\n# line 1013 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DBEG, '#{'.freeze)\n\n if current_literal.heredoc?\n current_literal.saved_herebody_s = @herebody_s\n @herebody_s = nil\n end\n\n current_literal.start_interp_brace\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 765\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 13 then\n# line 958 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 328\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 162 then\n# line 883 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 161 then\n# line 821 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 758;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 166 then\n# line 958 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 328\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 164 then\n# line 821 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 758;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 12 then\n# line 821 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 758;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 168 then\n# line 883 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 167 then\n# line 821 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 758;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 175 then\n# line 1013 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DBEG, '#{'.freeze)\n\n if current_literal.heredoc?\n current_literal.saved_herebody_s = @herebody_s\n @herebody_s = nil\n end\n\n current_literal.start_interp_brace\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 765\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 15 then\n# line 958 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 328\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 171 then\n# line 883 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 170 then\n# line 821 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 758;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 176 then\n# line 958 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n current_literal.flush_string\n current_literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 328\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 173 then\n# line 945 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n literal.extend_space @ts, @te\n end\n\t\tend\n\twhen 174 then\n# line 821 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 758;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 14 then\n# line 821 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 758;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 178 then\n# line 883 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 177 then\n# line 821 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = tok\n\n # tLABEL_END is only possible in non-cond context on >= 2.2\n if @version >= 22 && [email protected]?\n lookahead = @source_buffer.slice(@te...@te+2)\n end\n\n current_literal = literal\n if !current_literal.heredoc? &&\n (token = current_literal.nest_and_try_closing(string, @ts, @te, lookahead))\n if token[0] == :tLABEL_END\n p += 1\n pop_literal\n @cs = 758;\n else\n @cs = (pop_literal);\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n current_literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 180 then\n# line 945 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n literal.extend_space @ts, @te\n end\n\t\tend\n\twhen 181 then\n# line 1103 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tREGEXP_OPT, tok(@ts, @te - 1), @ts, @te - 1)\n p = p - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 182 then\n# line 1091 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n unknown_options = tok.scan(/[^imxouesn]/)\n if unknown_options.any?\n diagnostic :error, :regexp_options,\n { :options => unknown_options.join }\n end\n\n emit(:tREGEXP_OPT)\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 16 then\n# line 1231 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if tok =~ /^\\$([1-9][0-9]*)$/\n emit(:tNTH_REF, tok(@ts + 1).to_i)\n elsif tok =~ /^\\$([&`'+])$/\n emit(:tBACK_REF)\n else\n emit(:tGVAR)\n end\n\n @cs = (stack_pop); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 183 then\n# line 1231 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if tok =~ /^\\$([1-9][0-9]*)$/\n emit(:tNTH_REF, tok(@ts + 1).to_i)\n elsif tok =~ /^\\$([&`'+])$/\n emit(:tBACK_REF)\n else\n emit(:tGVAR)\n end\n\n @cs = (stack_pop); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 185 then\n# line 1244 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if tok =~ /^@@[0-9]/\n diagnostic :error, :cvar_name, { :name => tok }\n end\n\n emit(:tCVAR)\n @cs = (stack_pop); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 184 then\n# line 1254 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if tok =~ /^@[0-9]/\n diagnostic :error, :ivar_name, { :name => tok }\n end\n\n emit(:tIVAR)\n @cs = (stack_pop); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 206 then\n# line 1275 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(KEYWORDS_BEGIN);\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 192 then\n# line 1283 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tIDENTIFIER)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 18 then\n# line 1287 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1\n @cs = 773; \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 328\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 189 then\n# line 1296 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 201 then\n# line 1300 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; p = p - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 20 then\n# line 1306 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if version?(23)\n type, delimiter = tok[0..-2], tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n else\n p = @ts - 1\n \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 188 then\n# line 1319 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 187 then\n# line 518 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 205 then\n# line 1275 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(KEYWORDS_BEGIN);\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 202 then\n# line 1279 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tCONSTANT)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 204 then\n# line 1283 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tIDENTIFIER)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 199 then\n# line 1287 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n @cs = 773; \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 328\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 195 then\n# line 1296 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 200 then\n# line 1303 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 193 then\n# line 1316 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 198 then\n# line 1319 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 19 then\n# line 1296 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin emit_table(PUNCTUATION)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 17 then\n# line 1319 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = p - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 191 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 39 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS_BEGIN);\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 40 then\n\tbegin begin p = (( @te))-1; end\n emit(:tCONSTANT)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 41 then\n\tbegin begin p = (( @te))-1; end\n emit(:tIDENTIFIER)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 22 then\n# line 1331 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tLABEL, tok(@ts, @te - 2), @ts, @te - 1)\n p = p - 1; @cs = 758; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 208 then\n# line 1337 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 207 then\n# line 518 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 210 then\n# line 1334 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 209 then\n# line 1337 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 21 then\n# line 1337 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = p - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 216 then\n# line 1363 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION)\n @cs = 474; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 215 then\n# line 1369 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 214 then\n# line 518 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 226 then\n# line 1348 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tCONSTANT)\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 217 then\n# line 1352 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tIDENTIFIER)\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 222 then\n# line 1363 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 474; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 220 then\n# line 1366 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 225 then\n# line 1369 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 249 then\n# line 1427 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Unlike expr_beg as invoked in the next rule, do not warn\n p = @ts - 1\n \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 232 then\n# line 1445 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if tok(tm, tm + 1) == '/'.freeze\n # Ambiguous regexp literal.\n diagnostic :warning, :ambiguous_literal, nil, range(tm, tm + 1)\n end\n\n p = tm - 1\n \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 238 then\n# line 1469 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 24 then\n# line 1477 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 240 then\n# line 1486 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = tm - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 39 then\n# line 1497 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = @ts - 1\n \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 227 then\n# line 1511 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 228 then\n# line 518 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 239 then\n# line 1436 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 235 then\n# line 1458 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :warning, :ambiguous_prefix, { :prefix => tok(tm, @te) },\n range(tm, @te)\n\n p = tm - 1\n \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 237 then\n# line 1474 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 231 then\n# line 1497 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n p = @ts - 1\n \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 230 then\n# line 1502 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 248 then\n# line 1511 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 25 then\n# line 1502 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n\t\tend\n\twhen 41 then\n# line 1511 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 23 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 67 then\n\tbegin begin p = (( @te))-1; end\n\n if tok(tm, tm + 1) == '/'.freeze\n # Ambiguous regexp literal.\n diagnostic :warning, :ambiguous_literal, nil, range(tm, tm + 1)\n end\n\n p = tm - 1\n \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\twhen 68 then\n\tbegin begin p = (( @te))-1; end\n\n diagnostic :warning, :ambiguous_prefix, { :prefix => tok(tm, @te) },\n range(tm, @te)\n\n p = tm - 1\n \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\twhen 73 then\n\tbegin begin p = (( @te))-1; end\n\n p = @ts - 1\n \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\telse\n\tbegin begin p = (( @te))-1; end\nend\nend \n\t\t\tend\n\twhen 43 then\n# line 1547 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1\n \tbegin\n\t\t @cs = 474\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 253 then\n# line 518 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 254 then\n# line 1547 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 474\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 44 then\n# line 1547 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = @ts - 1\n \tbegin\n\t\t @cs = 474\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 42 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 80 then\n\tbegin begin p = (( @te))-1; end\n\n if @cond.active?\n emit(:kDO_COND, 'do'.freeze, @te - 2, @te)\n else\n emit(:kDO, 'do'.freeze, @te - 2, @te)\n end\n @cs = 765; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 81 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1\n \tbegin\n\t\t @cs = 474\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 264 then\n# line 1581 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_do(true)\n @cs = 765; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 257 then\n# line 1587 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 258 then\n# line 518 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 259 then\n# line 1584 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 262 then\n# line 1587 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 268 then\n# line 1611 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 267 then\n# line 518 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 276 then\n# line 1603 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 270 then\n# line 1605 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 274 then\n# line 1611 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 269 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 88 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 89 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 53 then\n# line 1626 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tUNARY_NUM, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = p - 1; @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 305 then\n# line 1643 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n type = delimiter = tok[0].chr\n p = p - 1; \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 299 then\n# line 1650 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n type, delimiter = @source_buffer.slice(@ts).chr, tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 51 then\n# line 1657 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n type, delimiter = tok[0..-2], tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 306 then\n# line 1703 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n type, delimiter = tok, tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 65 then\n# line 1717 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tSYMBOL, tok(@ts + 1), @ts)\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 320 then\n# line 1748 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n escape = { \" \" => '\\s', \"\\r\" => '\\r', \"\\n\" => '\\n', \"\\t\" => '\\t',\n \"\\v\" => '\\v', \"\\f\" => '\\f' }[@source_buffer.slice(@ts + 1)]\n diagnostic :warning, :invalid_escape_use, { :escape => escape }, range\n\n p = @ts - 1\n \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 319 then\n# line 1758 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n diagnostic :fatal, :incomplete_escape, nil, range(@ts, @ts + 1)\n end\n\t\tend\n\twhen 307 then\n# line 1797 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION_BEGIN)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 48 then\n# line 1817 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = p - 1;\n\n if version?(18)\n ident = tok(@ts, @te - 2)\n\n emit((@source_buffer.slice(@ts) =~ /[A-Z]/) ? :tCONSTANT : :tIDENTIFIER,\n ident, @ts, @te - 2)\n p = p - 1; # continue as a symbol\n\n if !@static_env.nil? && @static_env.declared?(ident)\n @cs = 773;\n else\n @cs = (arg_or_cmdarg);\n end\n else\n emit(:tLABEL, tok(@ts, @te - 2), @ts, @te - 1)\n @cs = 758;\n end\n\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 293 then\n# line 1863 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1\n \tbegin\n\t\t @cs = 171\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 52 then\n# line 1876 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 279 then\n# line 518 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 304 then\n# line 1626 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tUNARY_NUM, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = p - 1; @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 303 then\n# line 1633 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tSTAR, '*'.freeze)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 300 then\n# line 1657 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n type, delimiter = tok[0..-2], tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 298 then\n# line 1663 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :fatal, :string_eof, nil, range(@ts, @ts + 1)\n end\n\t\tend\n\twhen 308 then\n# line 1717 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1), @ts)\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 318 then\n# line 1758 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :fatal, :incomplete_escape, nil, range(@ts, @ts + 1)\n end\n\t\tend\n\twhen 324 then\n# line 1764 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n p = @ts - 1\n \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 301 then\n# line 1797 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION_BEGIN)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 346 then\n# line 1216 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 290 then\n# line 1860 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 292 then\n# line 1863 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 171\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 295 then\n# line 1876 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 50 then\n# line 1663 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n diagnostic :fatal, :string_eof, nil, range(@ts, @ts + 1)\n end\n\t\tend\n\twhen 68 then\n# line 1731 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n if defined?(Encoding)\n emit(:tINTEGER, value.dup.force_encoding(Encoding::BINARY)[0].ord)\n else\n emit(:tINTEGER, value[0].ord)\n end\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 49 then\n# line 1860 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n\t\tend\n\twhen 64 then\n# line 1876 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = @ts - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 47 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 94 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tUNARY_NUM, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = p - 1; @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 111 then\n\tbegin begin p = (( @te))-1; end\n emit_table(PUNCTUATION_BEGIN)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 112 then\n\tbegin begin p = (( @te))-1; end\n emit(:kRESCUE, 'rescue'.freeze, @ts, tm)\n p = tm - 1\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 113 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS_BEGIN)\n @cs = 765; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 115 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1\n \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\twhen 116 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 119 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 354 then\n# line 1896 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 355 then\n# line 518 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 356 then\n# line 1884 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 360 then\n# line 1896 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 71 then\n# line 1906 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1\n \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 364 then\n# line 1911 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n \tbegin\n\t\t @cs = (push_literal(tok, tok, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 363 then\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 362 then\n# line 518 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 366 then\n# line 1915 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 365 then\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 70 then\n# line 1921 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = p - 1; \tbegin\n\t\t @cs = 543\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 396 then\n# line 1932 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tLAMBDA, '->'.freeze, @ts, @ts + 2)\n\n @lambda_stack.push @paren_nest\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 78 then\n# line 1969 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:kCLASS, 'class'.freeze, @ts, @ts + 5)\n emit(:tLSHFT, '<<'.freeze, @te - 2, @te)\n @cs = 765; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 374 then\n# line 2104 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n type, delimiter = tok, tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts, nil, false, false, true))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 73 then\n# line 2122 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1; \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 328\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 393 then\n# line 2129 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION)\n @cs = 453; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 387 then\n# line 2156 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION)\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 380 then\n# line 2160 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit_table(PUNCTUATION)\n @cond.lexpop; @cmdarg.lexpop\n\n if RBRACE_OR_RBRACK.include?(tok)\n @cs = 511;\n else # )\n # fnext expr_endfn; ?\n end\n\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 392 then\n# line 2174 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tOP_ASGN, tok(@ts, @te - 1))\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 378 then\n# line 2178 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tEH, '?'.freeze)\n @cs = 765; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 375 then\n# line 2186 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION)\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 377 then\n# line 2199 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tSEMI, ';'.freeze)\n @cs = 765; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 442 then\n# line 2202 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n diagnostic :error, :bare_backslash, nil, range(@ts, @ts + 1)\n p = p - 1;\n end\n\t\tend\n\twhen 373 then\n# line 2208 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n diagnostic :fatal, :unexpected, { :character => tok.inspect[1..-2] }\n end\n\t\tend\n\twhen 372 then\n# line 518 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 452 then\n# line 1965 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(KEYWORDS)\n @cs = 333; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 450 then\n# line 1969 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:kCLASS, 'class'.freeze, @ts, @ts + 5)\n emit(:tLSHFT, '<<'.freeze, @te - 2, @te)\n @cs = 765; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 449 then\n# line 1980 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(KEYWORDS)\n @cs = 765; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 399 then\n# line 2054 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :error, :no_dot_digit_literal\n end\n\t\tend\n\twhen 439 then\n# line 2114 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tCONSTANT)\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 391 then\n# line 2122 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1; \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 328\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 397 then\n# line 2129 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 453; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 444 then\n# line 1216 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 386 then\n# line 2156 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 398 then\n# line 2186 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 384 then\n# line 2193 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 390 then\n# line 2208 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :fatal, :unexpected, { :character => tok.inspect[1..-2] }\n end\n\t\tend\n\twhen 74 then\n# line 2054 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n diagnostic :error, :no_dot_digit_literal\n end\n\t\tend\n\twhen 72 then\n# line 2208 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n diagnostic :fatal, :unexpected, { :character => tok.inspect[1..-2] }\n end\n\t\tend\n\twhen 75 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 132 then\n\tbegin begin p = (( @te))-1; end\n\n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n\n if tok == '{'.freeze\n emit(:tLAMBEG, '{'.freeze)\n else # 'do'\n emit(:kDO_LAMBDA, 'do'.freeze)\n end\n else\n if tok == '{'.freeze\n emit(:tLCURLY, '{'.freeze)\n else # 'do'\n emit_do\n end\n end\n\n @cs = 765; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 133 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 333; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 134 then\n\tbegin begin p = (( @te))-1; end\n emit(:kCLASS, 'class'.freeze, @ts, @ts + 5)\n emit(:tLSHFT, '<<'.freeze, @te - 2, @te)\n @cs = 765; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 135 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 136 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 765; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 137 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 138 then\n\tbegin begin p = (( @te))-1; end\n\n emit_table(KEYWORDS)\n\n if version?(18) && tok == 'not'.freeze\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = 474; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 139 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18)\n emit(:tIDENTIFIER)\n\n unless !@static_env.nil? && @static_env.declared?(tok)\n @cs = (arg_or_cmdarg);\n end\n else\n emit(:k__ENCODING__, '__ENCODING__'.freeze)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 140 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 141 then\n\tbegin begin p = (( @te))-1; end\n\n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 143 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18, 19, 20)\n diagnostic :error,\n :trailing_in_number, { :character => tok(@te - 1, @te) },\n range(@te - 1, @te)\n else\n emit(:tINTEGER, tok(@ts, @te - 1).to_i, @ts, @te - 1)\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 144 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18, 19, 20)\n diagnostic :error,\n :trailing_in_number, { :character => tok(@te - 1, @te) },\n range(@te - 1, @te)\n else\n emit(:tFLOAT, tok(@ts, @te - 1).to_f, @ts, @te - 1)\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 145 then\n\tbegin begin p = (( @te))-1; end\n\n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 147 then\n\tbegin begin p = (( @te))-1; end\n emit(:tCONSTANT)\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 151 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 152 then\n\tbegin begin p = (( @te))-1; end\n\n if tm == @te\n # Suffix was consumed, e.g. foo!\n emit(:tFID)\n else\n # Suffix was not consumed, e.g. foo!=\n emit(:tIDENTIFIER, tok(@ts, tm), @ts, tm)\n p = tm - 1\n end\n @cs = 474; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\nend \n\t\t\tend\n\twhen 82 then\n# line 2220 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = tm - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 457 then\n# line 2223 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tNL, nil, @newline_s, @newline_s + 1)\n p = p - 1; @cs = 171; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 458 then\n# line 2223 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tNL, nil, @newline_s, @newline_s + 1)\n p = p - 1; @cs = 171; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 79 then\n# line 2223 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin emit(:tNL, nil, @newline_s, @newline_s + 1)\n p = p - 1; @cs = 171; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 461 then\n# line 2233 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit_comment(@eq_begin_s, @te)\n \tbegin\n\t\t @cs = 171\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 460 then\n# line 2241 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :fatal, :embedded_document, nil,\n range(@eq_begin_s, @eq_begin_s + '=begin'.length)\n end\n\t\tend\n\twhen 93 then\n# line 2251 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin @eq_begin_s = @ts\n \tbegin\n\t\t @cs = 949\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 2 then\n# line 2255 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = pe - 3 end\n\t\tend\n\twhen 85 then\n# line 2258 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 765\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 86 then\n# line 518 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 87 then\n# line 2248 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 92 then\n# line 2251 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin @eq_begin_s = @ts\n \tbegin\n\t\t @cs = 949\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 91 then\n# line 2258 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 765\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 1 then\n# line 2258 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = p - 1; \tbegin\n\t\t @cs = 765\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 63 then\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n\twhen 97 then\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 883 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 125 then\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 883 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 151 then\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 883 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 157 then\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 883 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 163 then\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 883 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 169 then\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 883 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 172 then\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 883 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 179 then\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 883 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n\twhen 250 then\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1427 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Unlike expr_beg as invoked in the next rule, do not warn\n p = @ts - 1\n \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 241 then\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1486 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = tm - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 233 then\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1497 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = @ts - 1\n \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 321 then\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1748 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n escape = { \" \" => '\\s', \"\\r\" => '\\r', \"\\n\" => '\\n', \"\\t\" => '\\t',\n \"\\v\" => '\\v', \"\\f\" => '\\f' }[@source_buffer.slice(@ts + 1)]\n diagnostic :warning, :invalid_escape_use, { :escape => escape }, range\n\n p = @ts - 1\n \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 294 then\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1863 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1\n \tbegin\n\t\t @cs = 171\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 462 then\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 2233 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit_comment(@eq_begin_s, @te)\n \tbegin\n\t\t @cs = 171\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 459 then\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 2238 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n\t\tend\n\twhen 94 then\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 2251 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin @eq_begin_s = @ts\n \tbegin\n\t\t @cs = 949\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 3 then\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 2255 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = pe - 3 end\n\t\tend\n\twhen 416 then\n# line 626 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tRATIONAL, Rational(chars)) } \t\tend\n# line 2027 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 415 then\n# line 627 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tIMAGINARY, Complex(0, chars)) } \t\tend\n# line 2027 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 417 then\n# line 628 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tIMAGINARY, Complex(0, Rational(chars))) } \t\tend\n# line 2027 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 410 then\n# line 632 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tIMAGINARY, Complex(0, Float(chars))) } \t\tend\n# line 2086 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 411 then\n# line 636 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tRATIONAL, Rational(chars)) } \t\tend\n# line 2086 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 412 then\n# line 637 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tIMAGINARY, Complex(0, Rational(chars))) } \t\tend\n# line 2086 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 120 then\n# line 652 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = \"\"\n\n codepoints = tok(@escape_s + 2, p - 1)\n codepoint_s = @escape_s + 2\n\n codepoints.split(/[ \\t]/).each do |codepoint_str|\n codepoint = codepoint_str.to_i(16)\n\n if codepoint >= 0x110000\n diagnostic :error, :unicode_point_too_large, nil,\n range(codepoint_s, codepoint_s + codepoint_str.length)\n break\n end\n\n @escape += codepoint.chr(Encoding::UTF_8)\n codepoint_s += codepoint_str.length + 1\n end\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 146 then\n# line 652 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = \"\"\n\n codepoints = tok(@escape_s + 2, p - 1)\n codepoint_s = @escape_s + 2\n\n codepoints.split(/[ \\t]/).each do |codepoint_str|\n codepoint = codepoint_str.to_i(16)\n\n if codepoint >= 0x110000\n diagnostic :error, :unicode_point_too_large, nil,\n range(codepoint_s, codepoint_s + codepoint_str.length)\n break\n end\n\n @escape += codepoint.chr(Encoding::UTF_8)\n codepoint_s += codepoint_str.length + 1\n end\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 342 then\n# line 652 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = \"\"\n\n codepoints = tok(@escape_s + 2, p - 1)\n codepoint_s = @escape_s + 2\n\n codepoints.split(/[ \\t]/).each do |codepoint_str|\n codepoint = codepoint_str.to_i(16)\n\n if codepoint >= 0x110000\n diagnostic :error, :unicode_point_too_large, nil,\n range(codepoint_s, codepoint_s + codepoint_str.length)\n break\n end\n\n @escape += codepoint.chr(Encoding::UTF_8)\n codepoint_s += codepoint_str.length + 1\n end\n \t\tend\n# line 1731 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n if defined?(Encoding)\n emit(:tINTEGER, value.dup.force_encoding(Encoding::BINARY)[0].ord)\n else\n emit(:tINTEGER, value[0].ord)\n end\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 104 then\n# line 672 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 130 then\n# line 672 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 326 then\n# line 672 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 1731 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n if defined?(Encoding)\n emit(:tINTEGER, value.dup.force_encoding(Encoding::BINARY)[0].ord)\n else\n emit(:tINTEGER, value[0].ord)\n end\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 107 then\n# line 679 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_escape\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 133 then\n# line 679 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_escape\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 329 then\n# line 679 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_escape\n \t\tend\n# line 1731 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n if defined?(Encoding)\n emit(:tINTEGER, value.dup.force_encoding(Encoding::BINARY)[0].ord)\n else\n emit(:tINTEGER, value[0].ord)\n end\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 109 then\n# line 698 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 135 then\n# line 698 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 331 then\n# line 698 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 1731 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n if defined?(Encoding)\n emit(:tINTEGER, value.dup.force_encoding(Encoding::BINARY)[0].ord)\n else\n emit(:tINTEGER, value[0].ord)\n end\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 106 then\n# line 705 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s, p).to_i(8) % 0x100) \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 132 then\n# line 705 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s, p).to_i(8) % 0x100) \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 328 then\n# line 705 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s, p).to_i(8) % 0x100) \t\tend\n# line 1731 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n if defined?(Encoding)\n emit(:tINTEGER, value.dup.force_encoding(Encoding::BINARY)[0].ord)\n else\n emit(:tINTEGER, value[0].ord)\n end\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 122 then\n# line 709 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s + 1, p).to_i(16)) \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 148 then\n# line 709 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s + 1, p).to_i(16)) \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 344 then\n# line 709 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s + 1, p).to_i(16)) \t\tend\n# line 1731 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n if defined?(Encoding)\n emit(:tINTEGER, value.dup.force_encoding(Encoding::BINARY)[0].ord)\n else\n emit(:tINTEGER, value[0].ord)\n end\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 117 then\n# line 713 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = tok(@escape_s + 1, p).to_i(16).chr(Encoding::UTF_8) \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 143 then\n# line 713 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = tok(@escape_s + 1, p).to_i(16).chr(Encoding::UTF_8) \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 339 then\n# line 713 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = tok(@escape_s + 1, p).to_i(16).chr(Encoding::UTF_8) \t\tend\n# line 1731 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n if defined?(Encoding)\n emit(:tINTEGER, value.dup.force_encoding(Encoding::BINARY)[0].ord)\n else\n emit(:tINTEGER, value[0].ord)\n end\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 121 then\n# line 717 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_hex_escape, nil, range(@escape_s - 1, p + 2)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 147 then\n# line 717 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_hex_escape, nil, range(@escape_s - 1, p + 2)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 343 then\n# line 717 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_hex_escape, nil, range(@escape_s - 1, p + 2)\n \t\tend\n# line 1731 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n if defined?(Encoding)\n emit(:tINTEGER, value.dup.force_encoding(Encoding::BINARY)[0].ord)\n else\n emit(:tINTEGER, value[0].ord)\n end\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 116 then\n# line 729 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 142 then\n# line 729 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 338 then\n# line 729 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 1731 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n if defined?(Encoding)\n emit(:tINTEGER, value.dup.force_encoding(Encoding::BINARY)[0].ord)\n else\n emit(:tINTEGER, value[0].ord)\n end\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 119 then\n# line 740 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :unterminated_unicode, nil, range(p - 1, p)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 145 then\n# line 740 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :unterminated_unicode, nil, range(p - 1, p)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 341 then\n# line 740 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :unterminated_unicode, nil, range(p - 1, p)\n \t\tend\n# line 1731 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n if defined?(Encoding)\n emit(:tINTEGER, value.dup.force_encoding(Encoding::BINARY)[0].ord)\n else\n emit(:tINTEGER, value[0].ord)\n end\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 105 then\n# line 766 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 131 then\n# line 766 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 327 then\n# line 766 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 1731 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n if defined?(Encoding)\n emit(:tINTEGER, value.dup.force_encoding(Encoding::BINARY)[0].ord)\n else\n emit(:tINTEGER, value[0].ord)\n end\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 154 then\n# line 772 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 159 then\n# line 772 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 54 then\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 30 then\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1441 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 32 then\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1457 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 34 then\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1485 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 194 then\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1316 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 213 then\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1334 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 221 then\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1366 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 33 then\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1497 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = @ts - 1\n \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 252 then\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1502 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 244 then\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1508 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 263 then\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1584 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 275 then\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1605 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 271 then\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1608 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 55 then\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1626 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tUNARY_NUM, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = p - 1; @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 291 then\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1860 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 361 then\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1884 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 357 then\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1887 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @in_kwarg\n p = p - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n else\n \tbegin\n\t\t @cs = 171\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 370 then\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1915 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 367 then\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1918 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \tbegin\n\t\t @cs = 171\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 443 then\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2193 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 385 then\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2196 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \tbegin\n\t\t @cs = 946\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 88 then\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2248 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 243 then\n# line 983 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n current_literal = literal\n if current_literal\n current_literal.start_interp_brace\n end\n \t\tend\n# line 1410 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @lambda_stack.last == @paren_nest\n p = @ts - 1\n \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n else\n emit(:tLCURLY, '{'.freeze, @te - 1, @te)\n @cs = 765; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 265 then\n# line 983 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n current_literal = literal\n if current_literal\n current_literal.start_interp_brace\n end\n \t\tend\n# line 1570 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n emit(:tLAMBEG, '{'.freeze)\n else\n emit(:tLBRACE_ARG, '{'.freeze)\n end\n @cs = 765;\n end\n\t\tend\n\twhen 353 then\n# line 983 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n current_literal = literal\n if current_literal\n current_literal.start_interp_brace\n end\n \t\tend\n# line 1775 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n emit(:tLAMBEG, '{'.freeze)\n else\n emit(:tLBRACE, '{'.freeze)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 455 then\n# line 983 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n current_literal = literal\n if current_literal\n current_literal.start_interp_brace\n end\n \t\tend\n# line 1940 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n\n if tok == '{'.freeze\n emit(:tLAMBEG, '{'.freeze)\n else # 'do'\n emit(:kDO_LAMBDA, 'do'.freeze)\n end\n else\n if tok == '{'.freeze\n emit(:tLCURLY, '{'.freeze)\n else # 'do'\n emit_do\n end\n end\n\n @cs = 765; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 456 then\n# line 992 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n current_literal = literal\n if current_literal\n if current_literal.end_interp_brace_and_try_closing\n if version?(18, 19)\n emit(:tRCURLY, '}'.freeze, p - 1, p)\n else\n emit(:tSTRING_DEND, '}'.freeze, p - 1, p)\n end\n\n if current_literal.saved_herebody_s\n @herebody_s = current_literal.saved_herebody_s\n end\n\n p = p - 1;\n @cs = (stack_pop);\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n \t\tend\n# line 2160 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit_table(PUNCTUATION)\n @cond.lexpop; @cmdarg.lexpop\n\n if RBRACE_OR_RBRACK.include?(tok)\n @cs = 511;\n else # )\n # fnext expr_endfn; ?\n end\n\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 57 then\n# line 1128 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n\twhen 61 then\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 197 then\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1316 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 212 then\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1334 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 224 then\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1366 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 246 then\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1505 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 261 then\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1584 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 273 then\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1605 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 297 then\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1860 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 359 then\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1884 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 369 then\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1915 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 389 then\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2193 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 90 then\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2248 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 218 then\n# line 1172 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1356 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tFID, tok(@ts, tm), @ts, tm)\n @cs = (arg_or_cmdarg); p = tm - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 309 then\n# line 1172 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1709 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 282 then\n# line 1172 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1848 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 382 then\n# line 1172 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 132 then\n\tbegin begin p = (( @te))-1; end\n\n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n\n if tok == '{'.freeze\n emit(:tLAMBEG, '{'.freeze)\n else # 'do'\n emit(:kDO_LAMBDA, 'do'.freeze)\n end\n else\n if tok == '{'.freeze\n emit(:tLCURLY, '{'.freeze)\n else # 'do'\n emit_do\n end\n end\n\n @cs = 765; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 133 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 333; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 134 then\n\tbegin begin p = (( @te))-1; end\n emit(:kCLASS, 'class'.freeze, @ts, @ts + 5)\n emit(:tLSHFT, '<<'.freeze, @te - 2, @te)\n @cs = 765; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 135 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 136 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 765; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 137 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 138 then\n\tbegin begin p = (( @te))-1; end\n\n emit_table(KEYWORDS)\n\n if version?(18) && tok == 'not'.freeze\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = 474; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 139 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18)\n emit(:tIDENTIFIER)\n\n unless !@static_env.nil? && @static_env.declared?(tok)\n @cs = (arg_or_cmdarg);\n end\n else\n emit(:k__ENCODING__, '__ENCODING__'.freeze)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 140 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 141 then\n\tbegin begin p = (( @te))-1; end\n\n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 143 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18, 19, 20)\n diagnostic :error,\n :trailing_in_number, { :character => tok(@te - 1, @te) },\n range(@te - 1, @te)\n else\n emit(:tINTEGER, tok(@ts, @te - 1).to_i, @ts, @te - 1)\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 144 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18, 19, 20)\n diagnostic :error,\n :trailing_in_number, { :character => tok(@te - 1, @te) },\n range(@te - 1, @te)\n else\n emit(:tFLOAT, tok(@ts, @te - 1).to_f, @ts, @te - 1)\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 145 then\n\tbegin begin p = (( @te))-1; end\n\n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 147 then\n\tbegin begin p = (( @te))-1; end\n emit(:tCONSTANT)\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 151 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 152 then\n\tbegin begin p = (( @te))-1; end\n\n if tm == @te\n # Suffix was consumed, e.g. foo!\n emit(:tFID)\n else\n # Suffix was not consumed, e.g. foo!=\n emit(:tIDENTIFIER, tok(@ts, tm), @ts, tm)\n p = tm - 1\n end\n @cs = 474; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\nend \n\t\t\tend\n\twhen 219 then\n# line 1173 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1356 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tFID, tok(@ts, tm), @ts, tm)\n @cs = (arg_or_cmdarg); p = tm - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 310 then\n# line 1173 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1709 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 284 then\n# line 1173 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1848 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 383 then\n# line 1173 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 2136 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if tm == @te\n # Suffix was consumed, e.g. foo!\n emit(:tFID)\n else\n # Suffix was not consumed, e.g. foo!=\n emit(:tIDENTIFIER, tok(@ts, tm), @ts, tm)\n p = tm - 1\n end\n @cs = 474; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 311 then\n# line 1178 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1709 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 285 then\n# line 1178 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1848 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 312 then\n# line 1179 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1709 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 286 then\n# line 1179 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1848 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 316 then\n# line 1180 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1709 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 289 then\n# line 1180 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1848 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 315 then\n# line 1181 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1709 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 288 then\n# line 1181 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 94 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tUNARY_NUM, tok(@ts, @ts + 1), @ts, @ts + 1)\n p = p - 1; @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 111 then\n\tbegin begin p = (( @te))-1; end\n emit_table(PUNCTUATION_BEGIN)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 112 then\n\tbegin begin p = (( @te))-1; end\n emit(:kRESCUE, 'rescue'.freeze, @ts, tm)\n p = tm - 1\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 113 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS_BEGIN)\n @cs = 765; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 115 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1\n \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\twhen 116 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 446; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 119 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 313 then\n# line 1182 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 3 \t\tend\n# line 1709 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 287 then\n# line 1182 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 3 \t\tend\n# line 1848 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 314 then\n# line 1187 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1709 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 438 then\n# line 1192 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 2118 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tCONSTANT, tok(@ts, tm), @ts, tm)\n p = tm - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 242 then\n# line 1198 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n \t\tend\n# line 1404 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tLBRACK, '['.freeze, @te - 1, @te)\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 345 then\n# line 1198 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n \t\tend\n# line 1787 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tLBRACK, '['.freeze)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 441 then\n# line 1198 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n \t\tend\n# line 2182 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tLBRACK2, '['.freeze)\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 234 then\n# line 1205 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n \t\tend\n# line 1385 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if version?(18)\n emit(:tLPAREN2, '('.freeze, @te - 1, @te)\n @cs = 765; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n emit(:tLPAREN_ARG, '('.freeze, @te - 1, @te)\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 247 then\n# line 1205 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n \t\tend\n# line 1398 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tLPAREN2, '('.freeze)\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 255 then\n# line 1205 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n \t\tend\n# line 1524 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tLPAREN_ARG, '('.freeze, @te - 1, @te)\n if version?(18)\n @cs = 765; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 302 then\n# line 1205 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n \t\tend\n# line 1792 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tLPAREN, '('.freeze)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 394 then\n# line 1205 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n \t\tend\n# line 2156 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 395 then\n# line 1211 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @paren_nest -= 1\n \t\tend\n# line 2160 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit_table(PUNCTUATION)\n @cond.lexpop; @cmdarg.lexpop\n\n if RBRACE_OR_RBRACK.include?(tok)\n @cs = 511;\n else # )\n # fnext expr_endfn; ?\n end\n\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 67 then\n# line 1675 \"lib/parser/lexer.rl\"\n\t\tbegin\n heredoc_e = p \t\tend\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 317 then\n# line 1676 \"lib/parser/lexer.rl\"\n\t\tbegin\n new_herebody_s = p \t\tend\n# line 1677 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n tok(@ts, heredoc_e) =~ /^<<(-?)(~?)([\"'`]?)(.*)\\3$/\n\n indent = !$1.empty? || !$2.empty?\n dedent_body = !$2.empty?\n type = $3.empty? ? '<<\"'.freeze : ('<<'.freeze + $3)\n delimiter = $4\n\n if dedent_body && version?(18, 19, 20, 21, 22)\n emit(:tLSHFT, '<<'.freeze, @ts, @ts + 2)\n p = @ts + 1\n @cs = 543; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (push_literal(type, delimiter, @ts, heredoc_e, indent, dedent_body));\n\n @herebody_s ||= new_herebody_s\n p = @herebody_s - 1\n end\n end\n\t\tend\n\twhen 322 then\n# line 1729 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = nil \t\tend\n# line 1731 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n if defined?(Encoding)\n emit(:tINTEGER, value.dup.force_encoding(Encoding::BINARY)[0].ord)\n else\n emit(:tINTEGER, value[0].ord)\n end\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 349 then\n# line 1802 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1803 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:kRESCUE, 'rescue'.freeze, @ts, tm)\n p = tm - 1\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 429 then\n# line 2020 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 16; @num_digits_s = p \t\tend\n# line 2026 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 423 then\n# line 2021 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = p \t\tend\n# line 2026 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 426 then\n# line 2022 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = p \t\tend\n# line 2026 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 420 then\n# line 2023 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 2; @num_digits_s = p \t\tend\n# line 2026 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 435 then\n# line 2024 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n# line 2026 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 404 then\n# line 2025 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = @ts \t\tend\n# line 2026 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 436 then\n# line 2026 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 625 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n\twhen 81 then\n# line 2219 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 2220 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = tm - 1; \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 8 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 203 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1275 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 39;\t\tend\n\twhen 190 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1279 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 40;\t\tend\n\twhen 186 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1283 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 41;\t\tend\n\twhen 26 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1445 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 67;\t\tend\n\twhen 236 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1458 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 68;\t\tend\n\twhen 27 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1497 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 73;\t\tend\n\twhen 229 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1502 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 74;\t\tend\n\twhen 256 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1534 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 80;\t\tend\n\twhen 45 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1547 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 81;\t\tend\n\twhen 277 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1599 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 88;\t\tend\n\twhen 266 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1603 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 89;\t\tend\n\twhen 280 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1797 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 111;\t\tend\n\twhen 348 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1803 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 112;\t\tend\n\twhen 347 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1809 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 113;\t\tend\n\twhen 69 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1848 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 115;\t\tend\n\twhen 278 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1216 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 116;\t\tend\n\twhen 281 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1876 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 119;\t\tend\n\twhen 451 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1940 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 132;\t\tend\n\twhen 446 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1965 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 133;\t\tend\n\twhen 454 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1975 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 135;\t\tend\n\twhen 447 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1980 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 136;\t\tend\n\twhen 448 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1984 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 137;\t\tend\n\twhen 453 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1988 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 138;\t\tend\n\twhen 445 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1999 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 139;\t\tend\n\twhen 440 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2013 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 140;\t\tend\n\twhen 376 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2027 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 141;\t\tend\n\twhen 406 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2071 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 144;\t\tend\n\twhen 76 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2086 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 145;\t\tend\n\twhen 379 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2114 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 147;\t\tend\n\twhen 371 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1216 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 151;\t\tend\n\twhen 381 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2136 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 152;\t\tend\n\twhen 160 then\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 883 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n current_literal = literal\n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(current_literal.str_s, current_literal.str_s + 1)\n end\n\n if current_literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, ''.freeze)\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, ''.freeze)\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if current_literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = current_literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Calculate indentation level for <<~HEREDOCs.\n current_literal.infer_indent_level(line)\n\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if current_literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n1\\n2\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if current_literal.words? && !eof_codepoint?(@source_pts[p])\n current_literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n current_literal.extend_string tok, @ts, @te\n current_literal.flush_string\n end\n end\n\t\tend\n# line 772 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n\twhen 110 then\n# line 672 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 683 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 136 then\n# line 672 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 683 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 332 then\n# line 672 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 683 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 1731 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n if defined?(Encoding)\n emit(:tINTEGER, value.dup.force_encoding(Encoding::BINARY)[0].ord)\n else\n emit(:tINTEGER, value[0].ord)\n end\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 115 then\n# line 672 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 687 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 141 then\n# line 672 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 687 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 337 then\n# line 672 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 687 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1731 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n if defined?(Encoding)\n emit(:tINTEGER, value.dup.force_encoding(Encoding::BINARY)[0].ord)\n else\n emit(:tINTEGER, value[0].ord)\n end\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 114 then\n# line 693 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source_buffer.slice(p - 1).chr \t\tend\n# line 687 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 140 then\n# line 693 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source_buffer.slice(p - 1).chr \t\tend\n# line 687 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 336 then\n# line 693 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source_buffer.slice(p - 1).chr \t\tend\n# line 687 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1731 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n if defined?(Encoding)\n emit(:tINTEGER, value.dup.force_encoding(Encoding::BINARY)[0].ord)\n else\n emit(:tINTEGER, value[0].ord)\n end\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 112 then\n# line 698 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 687 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 138 then\n# line 698 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 687 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 334 then\n# line 698 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 687 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1731 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n if defined?(Encoding)\n emit(:tINTEGER, value.dup.force_encoding(Encoding::BINARY)[0].ord)\n else\n emit(:tINTEGER, value[0].ord)\n end\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 108 then\n# line 699 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source_buffer.slice(p - 1).chr \t\tend\n# line 683 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 134 then\n# line 699 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source_buffer.slice(p - 1).chr \t\tend\n# line 683 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 330 then\n# line 699 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source_buffer.slice(p - 1).chr \t\tend\n# line 683 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 1731 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n if defined?(Encoding)\n emit(:tINTEGER, value.dup.force_encoding(Encoding::BINARY)[0].ord)\n else\n emit(:tINTEGER, value[0].ord)\n end\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 118 then\n# line 729 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 740 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :unterminated_unicode, nil, range(p - 1, p)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 144 then\n# line 729 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 740 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :unterminated_unicode, nil, range(p - 1, p)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 340 then\n# line 729 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :invalid_unicode_escape, nil, range(@escape_s - 1, p)\n \t\tend\n# line 740 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :unterminated_unicode, nil, range(p - 1, p)\n \t\tend\n# line 1731 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n if defined?(Encoding)\n emit(:tINTEGER, value.dup.force_encoding(Encoding::BINARY)[0].ord)\n else\n emit(:tINTEGER, value[0].ord)\n end\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 102 then\n# line 772 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n# line 766 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 129 then\n# line 772 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n# line 766 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 325 then\n# line 772 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n# line 766 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 1731 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n if defined?(Encoding)\n emit(:tINTEGER, value.dup.force_encoding(Encoding::BINARY)[0].ord)\n else\n emit(:tINTEGER, value[0].ord)\n end\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 58 then\n# line 1128 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 196 then\n# line 1128 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1316 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 211 then\n# line 1128 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1334 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 223 then\n# line 1128 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1366 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 245 then\n# line 1128 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1505 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \tbegin\n\t\t @cs = 773\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 260 then\n# line 1128 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1584 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 272 then\n# line 1128 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1605 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 296 then\n# line 1128 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1860 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 358 then\n# line 1128 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1884 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 368 then\n# line 1128 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1915 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 388 then\n# line 1128 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2193 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 89 then\n# line 1128 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2248 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 431 then\n# line 2024 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n# line 2026 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 625 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n\twhen 401 then\n# line 2025 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = @ts \t\tend\n# line 2026 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 625 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n\twhen 413 then\n# line 2026 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 625 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2027 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 408 then\n# line 2083 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 631 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tFLOAT, Float(chars)) } \t\tend\n# line 2086 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 405 then\n# line 2084 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 631 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tFLOAT, Float(chars)) } \t\tend\n# line 2086 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 251 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 492 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1502 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 74;\t\tend\n\twhen 35 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1497 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 73;\t\tend\n\twhen 46 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1547 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 81;\t\tend\n\twhen 62 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1626 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 94;\t\tend\n\twhen 77 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1969 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 134;\t\tend\n\twhen 37 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1457 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1458 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 68;\t\tend\n\twhen 351 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1802 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1848 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 115;\t\tend\n\twhen 350 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1802 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1216 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 116;\t\tend\n\twhen 432 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2024 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n# line 2027 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 141;\t\tend\n\twhen 113 then\n# line 672 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 683 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 687 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 139 then\n# line 672 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 683 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 687 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 335 then\n# line 672 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n codepoint = @source_pts[p - 1]\n if (@escape = ESCAPES[codepoint]).nil?\n @escape = encode_escape(@source_buffer.slice(p - 1))\n end\n \t\tend\n# line 683 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 687 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1731 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n if defined?(Encoding)\n emit(:tINTEGER, value.dup.force_encoding(Encoding::BINARY)[0].ord)\n else\n emit(:tINTEGER, value[0].ord)\n end\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 111 then\n# line 699 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source_buffer.slice(p - 1).chr \t\tend\n# line 683 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 687 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 137 then\n# line 699 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source_buffer.slice(p - 1).chr \t\tend\n# line 683 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 687 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n current_literal = literal\n # Get the first character after the backslash.\n escaped_char = @source_buffer.slice(@escape_s).chr\n\n if current_literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if current_literal.regexp? && REGEXP_META_CHARACTERS.match(escaped_char)\n # Regular expressions should include escaped delimiters in their\n # escaped form, except when the escaped character is\n # a closing delimiter but not a regexp metacharacter.\n #\n # The backslash itself cannot be used as a closing delimiter\n # at the same time as an escape symbol, but it is always munged,\n # so this branch also executes for the non-closing-delimiter case\n # for the backslash.\n current_literal.extend_string(tok, @ts, @te)\n else\n current_literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n if current_literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n current_literal.extend_string(tok.gsub(\"\\\\\\n\".freeze, ''.freeze), @ts, @te)\n else\n current_literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n\t\tend\n\twhen 333 then\n# line 699 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source_buffer.slice(p - 1).chr \t\tend\n# line 683 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 687 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1731 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n value = @escape || tok(@ts + 1)\n\n if version?(18)\n if defined?(Encoding)\n emit(:tINTEGER, value.dup.force_encoding(Encoding::BINARY)[0].ord)\n else\n emit(:tINTEGER, value[0].ord)\n end\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 773; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 427 then\n# line 2020 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 16; @num_digits_s = p \t\tend\n# line 2026 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 625 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2027 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 421 then\n# line 2021 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = p \t\tend\n# line 2026 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 625 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2027 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 424 then\n# line 2022 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = p \t\tend\n# line 2026 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 625 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2027 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 418 then\n# line 2023 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 2; @num_digits_s = p \t\tend\n# line 2026 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 625 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2027 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 430 then\n# line 2024 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n# line 2026 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 625 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2027 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 400 then\n# line 2025 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = @ts \t\tend\n# line 2026 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 625 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2027 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'.freeze\n diagnostic :error, :trailing_in_number, { :character => '_'.freeze },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = '0'.freeze\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base), @ts, @num_suffix_s)\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 31 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 812 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1457 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1458 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 68;\t\tend\n\twhen 59 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1128 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1131 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1626 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 94;\t\tend\n\twhen 437 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2026 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 625 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2059 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 143;\t\tend\n\twhen 433 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2024 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n# line 2026 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 625 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2059 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 143;\t\tend\n\twhen 403 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2025 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = @ts \t\tend\n# line 2026 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 625 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |chars| emit(:tINTEGER, chars) } \t\tend\n# line 2059 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 143;\t\tend\n# line 21651 \"lib/parser/lexer.rb\"\n\tend\n\tend\n\tend\n\tif _goto_level <= _again\n\tcase _lex_to_state_actions[ @cs] \n\twhen 83 then\n# line 1 \"NONE\"\n\t\tbegin\n @ts = nil;\t\tend\n# line 21661 \"lib/parser/lexer.rb\"\n\tend\n\n\tif @cs == 0\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\tp += 1\n\tif p != pe\n\t\t_goto_level = _resume\n\t\tnext\n\tend\n\tend\n\tif _goto_level <= _test_eof\n\tif p == eof\n\tif _lex_eof_trans[ @cs] > 0\n\t\t_trans = _lex_eof_trans[ @cs] - 1;\n\t\t_goto_level = _eof_trans\n\t\tnext;\n\tend\n\tend\n\n\tend\n\tif _goto_level <= _out\n\t\tbreak\n\tend\nend\n\tend\n\n# line 282 \"lib/parser/lexer.rl\"\n # %\n\n @p = p\n\n if @token_queue.any?\n @token_queue.shift\n elsif @cs == klass.lex_error\n [ false, [ '$error'.freeze, range(p - 1, p) ] ]\n else\n eof = @source_pts.size\n [ false, [ '$eof'.freeze, range(eof, eof) ] ]\n end\n end", "def pos() @current end", "def consume\n @current = @tokens[@pos]\n @pos += 1 if @current\n @current\n end", "def advance; end", "def advance; end", "def advance; end", "def advance\n @to = @to.next\n end", "def advance\n if @token_queue.any?\n return @token_queue.shift\n end\n\n # Ugly, but dependent on Ragel output. Consider refactoring it somehow.\n _lex_trans_keys = self.class.send :_lex_trans_keys\n _lex_key_spans = self.class.send :_lex_key_spans\n _lex_index_offsets = self.class.send :_lex_index_offsets\n _lex_indicies = self.class.send :_lex_indicies\n _lex_trans_targs = self.class.send :_lex_trans_targs\n _lex_trans_actions = self.class.send :_lex_trans_actions\n _lex_to_state_actions = self.class.send :_lex_to_state_actions\n _lex_from_state_actions = self.class.send :_lex_from_state_actions\n _lex_eof_trans = self.class.send :_lex_eof_trans\n\n p, pe, eof = @p, @source.length + 1, @source.length + 1\n\n @command_state = (@cs == self.class.lex_en_expr_value ||\n @cs == self.class.lex_en_line_begin)\n\n \n# line 10604 \"lib/parser/lexer.rb\"\nbegin\n\ttestEof = false\n\t_slen, _trans, _keys, _inds, _acts, _nacts = nil\n\t_goto_level = 0\n\t_resume = 10\n\t_eof_trans = 15\n\t_again = 20\n\t_test_eof = 30\n\t_out = 40\n\twhile true\n\tif _goto_level <= 0\n\tif p == pe\n\t\t_goto_level = _test_eof\n\t\tnext\n\tend\n\tif @cs == 0\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\tend\n\tif _goto_level <= _resume\n\tcase _lex_from_state_actions[ @cs] \n\twhen 74 then\n# line 1 \"NONE\"\n\t\tbegin\n @ts = p\n\t\tend\n# line 10632 \"lib/parser/lexer.rb\"\n\tend\n\t_keys = @cs << 1\n\t_inds = _lex_index_offsets[ @cs]\n\t_slen = _lex_key_spans[ @cs]\n\t_trans = if ( _slen > 0 && \n\t\t\t_lex_trans_keys[_keys] <= ( (@source_pts[p] || 0)) && \n\t\t\t( (@source_pts[p] || 0)) <= _lex_trans_keys[_keys + 1] \n\t\t ) then\n\t\t\t_lex_indicies[ _inds + ( (@source_pts[p] || 0)) - _lex_trans_keys[_keys] ] \n\t\t else \n\t\t\t_lex_indicies[ _inds + _slen ]\n\t\t end\n\tend\n\tif _goto_level <= _eof_trans\n\t @cs = _lex_trans_targs[_trans]\n\tif _lex_trans_actions[_trans] != 0\n\tcase _lex_trans_actions[_trans]\n\twhen 22 then\n# line 460 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 52 then\n# line 752 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n\twhen 23 then\n# line 792 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n\twhen 63 then\n# line 1071 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n\twhen 66 then\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n\twhen 253 then\n# line 1115 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 30 then\n# line 1364 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 32 then\n# line 1380 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 34 then\n# line 1408 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 49 then\n# line 1592 \"lib/parser/lexer.rl\"\n\t\tbegin\n @heredoc_e = p \t\tend\n\twhen 322 then\n# line 1709 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 389 then\n# line 1902 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 16; @num_digits_s = p \t\tend\n\twhen 383 then\n# line 1903 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = p \t\tend\n\twhen 386 then\n# line 1904 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = p \t\tend\n\twhen 380 then\n# line 1905 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 2; @num_digits_s = p \t\tend\n\twhen 395 then\n# line 1906 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n\twhen 363 then\n# line 1907 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = @ts \t\tend\n\twhen 375 then\n# line 1908 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 370 then\n# line 1965 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 368 then\n# line 1966 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 7 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n\twhen 90 then\n# line 983 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n literal.flush_string\n literal.extend_content\n\n emit(:tSTRING_DBEG, '#{')\n\n if literal.heredoc?\n literal.saved_herebody_s = @herebody_s\n @herebody_s = nil\n end\n\n literal.start_interp_brace\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 731\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 5 then\n# line 931 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n literal.flush_string\n literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 305\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 86 then\n# line 860 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(literal.str_s, literal.str_s + 1)\n end\n\n if literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, '')\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, '')\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n12\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if literal.words? && !eof_codepoint?(@source_pts[p])\n literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n literal.extend_string tok, @ts, @te\n literal.flush_string\n end\n end\n\t\tend\n\twhen 85 then\n# line 801 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = @source[@ts...@te]\n\n if !literal.heredoc? && literal.nest_and_try_closing(string, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 91 then\n# line 931 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n literal.flush_string\n literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 305\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 88 then\n# line 918 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n literal.extend_space @ts, @te\n end\n\t\tend\n\twhen 89 then\n# line 801 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = @source[@ts...@te]\n\n if !literal.heredoc? && literal.nest_and_try_closing(string, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 6 then\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 4 then\n# line 801 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n string = @source[@ts...@te]\n\n if !literal.heredoc? && literal.nest_and_try_closing(string, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 117 then\n# line 983 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n literal.flush_string\n literal.extend_content\n\n emit(:tSTRING_DBEG, '#{')\n\n if literal.heredoc?\n literal.saved_herebody_s = @herebody_s\n @herebody_s = nil\n end\n\n literal.start_interp_brace\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 731\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 10 then\n# line 931 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n literal.flush_string\n literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 305\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 114 then\n# line 860 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(literal.str_s, literal.str_s + 1)\n end\n\n if literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, '')\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, '')\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n12\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if literal.words? && !eof_codepoint?(@source_pts[p])\n literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n literal.extend_string tok, @ts, @te\n literal.flush_string\n end\n end\n\t\tend\n\twhen 113 then\n# line 801 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = @source[@ts...@te]\n\n if !literal.heredoc? && literal.nest_and_try_closing(string, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 118 then\n# line 931 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n literal.flush_string\n literal.extend_content\n\n emit(:tSTRING_DVAR, nil, @ts, @ts + 1)\n\n p = @ts\n \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 305\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 116 then\n# line 801 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = @source[@ts...@te]\n\n if !literal.heredoc? && literal.nest_and_try_closing(string, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 11 then\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 9 then\n# line 801 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n string = @source[@ts...@te]\n\n if !literal.heredoc? && literal.nest_and_try_closing(string, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 141 then\n# line 860 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(literal.str_s, literal.str_s + 1)\n end\n\n if literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, '')\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, '')\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n12\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if literal.words? && !eof_codepoint?(@source_pts[p])\n literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n literal.extend_string tok, @ts, @te\n literal.flush_string\n end\n end\n\t\tend\n\twhen 140 then\n# line 801 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = @source[@ts...@te]\n\n if !literal.heredoc? && literal.nest_and_try_closing(string, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 143 then\n# line 918 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n literal.extend_space @ts, @te\n end\n\t\tend\n\twhen 144 then\n# line 801 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = @source[@ts...@te]\n\n if !literal.heredoc? && literal.nest_and_try_closing(string, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 147 then\n# line 860 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(literal.str_s, literal.str_s + 1)\n end\n\n if literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, '')\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, '')\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n12\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if literal.words? && !eof_codepoint?(@source_pts[p])\n literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n literal.extend_string tok, @ts, @te\n literal.flush_string\n end\n end\n\t\tend\n\twhen 146 then\n# line 801 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n string = @source[@ts...@te]\n\n if !literal.heredoc? && literal.nest_and_try_closing(string, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 149 then\n# line 801 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n string = @source[@ts...@te]\n\n if !literal.heredoc? && literal.nest_and_try_closing(string, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n literal.extend_string(string, @ts, @te)\n end\n end\n\t\tend\n\twhen 152 then\n# line 1046 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tREGEXP_OPT, tok(@ts, @te - 1), @ts, @te - 1)\n p = p - 1; \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 153 then\n# line 1034 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n unknown_options = tok.scan(/[^imxouesn]/)\n if unknown_options.any?\n diagnostic :error, :regexp_options,\n { :options => unknown_options.join }\n end\n\n emit(:tREGEXP_OPT)\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 12 then\n# line 1174 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if tok =~ /^\\$([1-9][0-9]*)$/\n emit(:tNTH_REF, tok(@ts + 1).to_i)\n elsif tok =~ /^\\$([&`'+])$/\n emit(:tBACK_REF)\n else\n emit(:tGVAR)\n end\n\n @cs = (stack_pop); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 154 then\n# line 1174 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if tok =~ /^\\$([1-9][0-9]*)$/\n emit(:tNTH_REF, tok(@ts + 1).to_i)\n elsif tok =~ /^\\$([&`'+])$/\n emit(:tBACK_REF)\n else\n emit(:tGVAR)\n end\n\n @cs = (stack_pop); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 156 then\n# line 1187 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if tok =~ /^@@[0-9]/\n diagnostic :error, :cvar_name, { :name => tok }\n end\n\n emit(:tCVAR)\n @cs = (stack_pop); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 155 then\n# line 1197 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if tok =~ /^@[0-9]/\n diagnostic :error, :ivar_name, { :name => tok }\n end\n\n emit(:tIVAR)\n @cs = (stack_pop); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 177 then\n# line 1218 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(KEYWORDS_BEGIN[tok]);\n @cs = 422; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 163 then\n# line 1226 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tIDENTIFIER)\n @cs = 422; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 14 then\n# line 1230 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1\n @cs = 739; \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 305\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 160 then\n# line 1239 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION)\n @cs = 422; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 172 then\n# line 1243 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; p = p - 1; \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 159 then\n# line 1251 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 158 then\n# line 486 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 176 then\n# line 1218 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(KEYWORDS_BEGIN[tok]);\n @cs = 422; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 173 then\n# line 1222 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tCONSTANT)\n @cs = 422; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 175 then\n# line 1226 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tIDENTIFIER)\n @cs = 422; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 170 then\n# line 1230 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n @cs = 739; \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 305\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 166 then\n# line 1239 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 422; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 171 then\n# line 1246 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 519\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 164 then\n# line 1248 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 169 then\n# line 1251 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 13 then\n# line 1251 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = p - 1; \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 162 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 25 then\n\tbegin begin p = (( @te))-1; end\n emit(KEYWORDS_BEGIN[tok]);\n @cs = 422; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 26 then\n\tbegin begin p = (( @te))-1; end\n emit(:tCONSTANT)\n @cs = 422; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 27 then\n\tbegin begin p = (( @te))-1; end\n emit(:tIDENTIFIER)\n @cs = 422; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 16 then\n# line 1263 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tLABEL, tok(@ts, @te - 1))\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 179 then\n# line 1269 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 178 then\n# line 486 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 181 then\n# line 1266 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 180 then\n# line 1269 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 15 then\n# line 1269 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = p - 1; \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 187 then\n# line 1295 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION)\n @cs = 451; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 186 then\n# line 1301 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 185 then\n# line 486 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 197 then\n# line 1280 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tCONSTANT)\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 188 then\n# line 1284 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tIDENTIFIER)\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 193 then\n# line 1295 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 451; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 191 then\n# line 1298 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 196 then\n# line 1301 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 201 then\n# line 1359 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 519\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 204 then\n# line 1368 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if tok(tm, tm + 1) == '/'\n # Ambiguous regexp literal.\n diagnostic :warning, :ambiguous_literal, nil, range(tm, tm + 1)\n end\n\n p = tm - 1\n \tbegin\n\t\t @cs = 519\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 210 then\n# line 1392 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; p = p - 1; \tbegin\n\t\t @cs = 519\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 18 then\n# line 1400 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1; \tbegin\n\t\t @cs = 519\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 212 then\n# line 1409 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = tm - 1; \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 33 then\n# line 1418 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = @ts - 1\n \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 198 then\n# line 1432 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 519\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 199 then\n# line 486 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 211 then\n# line 1359 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 519\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 207 then\n# line 1381 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :warning, :ambiguous_prefix, { :prefix => tok(tm, @te) },\n range(tm, @te)\n\n p = tm - 1\n \tbegin\n\t\t @cs = 519\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 209 then\n# line 1397 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 519\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 203 then\n# line 1418 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n p = @ts - 1\n \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 202 then\n# line 1423 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 220 then\n# line 1432 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 519\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 19 then\n# line 1423 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n\t\tend\n\twhen 35 then\n# line 1432 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = p - 1; \tbegin\n\t\t @cs = 519\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 17 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 51 then\n\tbegin begin p = (( @te))-1; end\n\n if tok(tm, tm + 1) == '/'\n # Ambiguous regexp literal.\n diagnostic :warning, :ambiguous_literal, nil, range(tm, tm + 1)\n end\n\n p = tm - 1\n \tbegin\n\t\t @cs = 519\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\twhen 52 then\n\tbegin begin p = (( @te))-1; end\n\n diagnostic :warning, :ambiguous_prefix, { :prefix => tok(tm, @te) },\n range(tm, @te)\n\n p = tm - 1\n \tbegin\n\t\t @cs = 519\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\twhen 57 then\n\tbegin begin p = (( @te))-1; end\n\n p = @ts - 1\n \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\telse\n\tbegin begin p = (( @te))-1; end\nend\nend \n\t\t\tend\n\twhen 37 then\n# line 1468 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1\n \tbegin\n\t\t @cs = 451\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 223 then\n# line 486 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 224 then\n# line 1468 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 451\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 38 then\n# line 1468 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = @ts - 1\n \tbegin\n\t\t @cs = 451\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 36 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 64 then\n\tbegin begin p = (( @te))-1; end\n\n if @cond.active?\n emit(:kDO_COND, 'do', @te - 2, @te)\n else\n emit(:kDO, 'do', @te - 2, @te)\n end\n @cs = 731; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 65 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1\n \tbegin\n\t\t @cs = 451\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 234 then\n# line 1495 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_do(true)\n @cs = 731; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 227 then\n# line 1501 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 228 then\n# line 486 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 229 then\n# line 1498 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 232 then\n# line 1501 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 238 then\n# line 1525 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 519\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 237 then\n# line 486 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 246 then\n# line 1517 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1; \tbegin\n\t\t @cs = 519\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 240 then\n# line 1519 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 244 then\n# line 1525 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 519\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 239 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 72 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 73 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1; \tbegin\n\t\t @cs = 519\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 274 then\n# line 1542 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = p - 1;\n if tok.start_with? '-'\n emit(:tUMINUS_NUM, '-', @ts, @ts + 1)\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 275 then\n# line 1562 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n type = delimiter = tok[0].chr\n p = p - 1; \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 269 then\n# line 1569 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n type, delimiter = tok[0].chr, tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 45 then\n# line 1576 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n type, delimiter = tok[0..-2], tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 276 then\n# line 1616 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n type, delimiter = tok, tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 48 then\n# line 1630 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit(:tSYMBOL, tok(@ts + 1), @ts)\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 289 then\n# line 1642 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Show an error if memorized.\n @escape.call if @escape.respond_to? :call\n\n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value[0].ord)\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 291 then\n# line 1658 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n escape = { \" \" => '\\s', \"\\r\" => '\\r', \"\\n\" => '\\n', \"\\t\" => '\\t',\n \"\\v\" => '\\v', \"\\f\" => '\\f' }[tok[1]]\n diagnostic :warning, :invalid_escape_use, { :escape => escape }, range\n\n p = @ts - 1\n \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 290 then\n# line 1668 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n diagnostic :fatal, :incomplete_escape, nil, range(@ts, @ts + 1)\n end\n\t\tend\n\twhen 277 then\n# line 1704 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION_BEGIN)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 42 then\n# line 1724 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = p - 1;\n\n if version?(18)\n ident = tok(@ts, @te - 2)\n\n emit((tok[0] =~ /[A-Z]/) ? :tCONSTANT : :tIDENTIFIER,\n ident, @ts, @te - 2)\n p = p - 1; # continue as a symbol\n\n if !@static_env.nil? && @static_env.declared?(ident)\n @cs = 739;\n else\n @cs = (arg_or_cmdarg);\n end\n else\n emit(:tLABEL, tok(@ts, @te - 2), @ts, @te - 1)\n end\n\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 263 then\n# line 1769 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1\n \tbegin\n\t\t @cs = 156\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 46 then\n# line 1782 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1; \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 249 then\n# line 486 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 273 then\n# line 1552 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tSTAR)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 270 then\n# line 1576 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n type, delimiter = tok[0..-2], tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 268 then\n# line 1582 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :fatal, :string_eof, nil, range(@ts, @ts + 1)\n end\n\t\tend\n\twhen 278 then\n# line 1630 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1), @ts)\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 293 then\n# line 1642 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n # Show an error if memorized.\n @escape.call if @escape.respond_to? :call\n\n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value[0].ord)\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 288 then\n# line 1668 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :fatal, :incomplete_escape, nil, range(@ts, @ts + 1)\n end\n\t\tend\n\twhen 294 then\n# line 1674 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n p = @ts - 1\n \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 271 then\n# line 1704 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION_BEGIN)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 316 then\n# line 1159 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 260 then\n# line 1766 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 262 then\n# line 1769 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 156\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 265 then\n# line 1782 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1; \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 44 then\n# line 1582 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n diagnostic :fatal, :string_eof, nil, range(@ts, @ts + 1)\n end\n\t\tend\n\twhen 53 then\n# line 1642 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n # Show an error if memorized.\n @escape.call if @escape.respond_to? :call\n\n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value[0].ord)\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 51 then\n# line 1668 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n diagnostic :fatal, :incomplete_escape, nil, range(@ts, @ts + 1)\n end\n\t\tend\n\twhen 43 then\n# line 1766 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n\t\tend\n\twhen 47 then\n# line 1782 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = @ts - 1; \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 41 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 94 then\n\tbegin begin p = (( @te))-1; end\n emit_table(PUNCTUATION_BEGIN)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 95 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS_BEGIN, @ts, tm)\n p = tm - 1\n @cs = 495; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 96 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS_BEGIN)\n @cs = 731; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 98 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1\n \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\twhen 99 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 102 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1; \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 56 then\n# line 1792 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1\n \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 325 then\n# line 1801 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 519\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 324 then\n# line 486 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 327 then\n# line 1795 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 326 then\n# line 1801 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 519\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 55 then\n# line 1801 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = p - 1; \tbegin\n\t\t @cs = 519\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 357 then\n# line 1812 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit_table(PUNCTUATION, @ts, @ts + 2)\n\n @lambda_stack.push @paren_nest\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 70 then\n# line 1849 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:kCLASS, 'class', @ts, @ts + 5)\n emit(:tLSHFT, '<<', @te - 2, @te)\n @cs = 731; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 335 then\n# line 1986 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n type, delimiter = tok, tok[-1].chr\n \tbegin\n\t\t @cs = (push_literal(type, delimiter, @ts))\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 58 then\n# line 2004 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1; \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 305\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 399 then\n# line 2011 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION)\n @cs = 429; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 349 then\n# line 2038 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION)\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 341 then\n# line 2042 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit_table(PUNCTUATION)\n @cond.lexpop; @cmdarg.lexpop\n\n if %w\"} ]\".include?(tok)\n @cs = 487;\n else # )\n # fnext expr_endfn; ?\n end\n\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 354 then\n# line 2056 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tOP_ASGN, tok(@ts, @te - 1))\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 339 then\n# line 2060 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION)\n @cs = 731; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 336 then\n# line 2068 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION)\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 338 then\n# line 2081 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit_table(PUNCTUATION)\n @cs = 731; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 404 then\n# line 2084 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n diagnostic :error, :bare_backslash, nil, range(@ts, @ts + 1)\n p = p - 1;\n end\n\t\tend\n\twhen 334 then\n# line 2090 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n diagnostic :fatal, :unexpected, { :character => tok.inspect[1..-2] }\n end\n\t\tend\n\twhen 333 then\n# line 486 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 414 then\n# line 1845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(KEYWORDS)\n @cs = 310; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 412 then\n# line 1849 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:kCLASS, 'class', @ts, @ts + 5)\n emit(:tLSHFT, '<<', @te - 2, @te)\n @cs = 731; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 411 then\n# line 1860 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(KEYWORDS)\n @cs = 731; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 360 then\n# line 1936 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :error, :no_dot_digit_literal\n end\n\t\tend\n\twhen 401 then\n# line 1996 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tCONSTANT)\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 353 then\n# line 2004 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1; \tbegin\n\t\t @stack[ @top] = @cs\n\t\t @top+= 1\n\t\t @cs = 305\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 358 then\n# line 2011 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 429; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 406 then\n# line 1159 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 348 then\n# line 2038 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 359 then\n# line 2068 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 346 then\n# line 2075 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 352 then\n# line 2090 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :fatal, :unexpected, { :character => tok.inspect[1..-2] }\n end\n\t\tend\n\twhen 59 then\n# line 1936 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n diagnostic :error, :no_dot_digit_literal\n end\n\t\tend\n\twhen 57 then\n# line 2090 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin \n diagnostic :fatal, :unexpected, { :character => tok.inspect[1..-2] }\n end\n\t\tend\n\twhen 60 then\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 110 then\n\tbegin begin p = (( @te))-1; end\n\n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n\n if tok == '{'\n emit(:tLAMBEG)\n else # 'do'\n emit(:kDO_LAMBDA)\n end\n else\n if tok == '{'\n emit_table(PUNCTUATION)\n else # 'do'\n emit_do\n end\n end\n\n @cs = 731; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 111 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 310; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 112 then\n\tbegin begin p = (( @te))-1; end\n emit(:kCLASS, 'class', @ts, @ts + 5)\n emit(:tLSHFT, '<<', @te - 2, @te)\n @cs = 731; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 113 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 114 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 731; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 115 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 495; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 116 then\n\tbegin begin p = (( @te))-1; end\n\n emit_table(KEYWORDS)\n\n if version?(18) && tok == 'not'\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = 451; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 117 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18)\n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 739;\n else\n @cs = (arg_or_cmdarg);\n end\n else\n emit_table(KEYWORDS)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 118 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 119 then\n\tbegin begin p = (( @te))-1; end\n\n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'\n diagnostic :error, :trailing_in_number, { :character => '_' },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = \"0\"\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base))\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 121 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18, 19, 20)\n diagnostic :error,\n :trailing_in_number, { :character => tok(@te - 1, @te) },\n range(@te - 1, @te)\n else\n emit(:tINTEGER, tok(@ts, @te - 1).to_i)\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 122 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18, 19, 20)\n diagnostic :error,\n :trailing_in_number, { :character => tok(@te - 1, @te) },\n range(@te - 1, @te)\n else\n emit(:tFLOAT, tok(@ts, @te - 1).to_f)\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 123 then\n\tbegin begin p = (( @te))-1; end\n\n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits))\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 125 then\n\tbegin begin p = (( @te))-1; end\n emit(:tCONSTANT)\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 129 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 130 then\n\tbegin begin p = (( @te))-1; end\n\n if tm == @te\n # Suffix was consumed, e.g. foo!\n emit(:tFID)\n else\n # Suffix was not consumed, e.g. foo!=\n emit(:tIDENTIFIER, tok(@ts, tm), @ts, tm)\n p = tm - 1\n end\n @cs = 451; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\nend \n\t\t\tend\n\twhen 72 then\n# line 2102 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; p = p - 1;\n \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 419 then\n# line 2106 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin emit(:tNL, nil, @newline_s, @newline_s + 1)\n p = p - 1; @cs = 156; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 420 then\n# line 2106 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tNL, nil, @newline_s, @newline_s + 1)\n p = p - 1; @cs = 156; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 71 then\n# line 2106 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin emit(:tNL, nil, @newline_s, @newline_s + 1)\n p = p - 1; @cs = 156; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 423 then\n# line 2116 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit_comment(@eq_begin_s, @te)\n \tbegin\n\t\t @cs = 156\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 422 then\n# line 2124 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n diagnostic :fatal, :embedded_document, nil,\n range(@eq_begin_s, @eq_begin_s + '=begin'.length)\n end\n\t\tend\n\twhen 83 then\n# line 2134 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin @eq_begin_s = @ts\n \tbegin\n\t\t @cs = 916\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 2 then\n# line 2138 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = pe - 3 end\n\t\tend\n\twhen 75 then\n# line 2141 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = p - 1; \tbegin\n\t\t @cs = 731\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 76 then\n# line 486 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n # Sit at EOF indefinitely. #advance would return $eof each time.\n # This allows to feed the lexer more data if needed; this is only used\n # in tests.\n #\n # Note that this action is not embedded into e_eof like e_heredoc_nl and e_bs\n # below. This is due to the fact that scanner state at EOF is observed\n # by tests, and encapsulating it in a rule would break the introspection.\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 77 then\n# line 2131 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 82 then\n# line 2134 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin @eq_begin_s = @ts\n \tbegin\n\t\t @cs = 916\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 81 then\n# line 2141 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 731\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 1 then\n# line 2141 \"lib/parser/lexer.rl\"\n\t\tbegin\n begin p = (( @te))-1; end\n begin p = p - 1; \tbegin\n\t\t @cs = 731\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 69 then\n# line 460 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n\twhen 87 then\n# line 460 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 860 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(literal.str_s, literal.str_s + 1)\n end\n\n if literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, '')\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, '')\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n12\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if literal.words? && !eof_codepoint?(@source_pts[p])\n literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n literal.extend_string tok, @ts, @te\n literal.flush_string\n end\n end\n\t\tend\n\twhen 115 then\n# line 460 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 860 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(literal.str_s, literal.str_s + 1)\n end\n\n if literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, '')\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, '')\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n12\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if literal.words? && !eof_codepoint?(@source_pts[p])\n literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n literal.extend_string tok, @ts, @te\n literal.flush_string\n end\n end\n\t\tend\n\twhen 142 then\n# line 460 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 860 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(literal.str_s, literal.str_s + 1)\n end\n\n if literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, '')\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, '')\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n12\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if literal.words? && !eof_codepoint?(@source_pts[p])\n literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n literal.extend_string tok, @ts, @te\n literal.flush_string\n end\n end\n\t\tend\n\twhen 148 then\n# line 460 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 860 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(literal.str_s, literal.str_s + 1)\n end\n\n if literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, '')\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, '')\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n12\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if literal.words? && !eof_codepoint?(@source_pts[p])\n literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n literal.extend_string tok, @ts, @te\n literal.flush_string\n end\n end\n\t\tend\n\twhen 213 then\n# line 460 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1409 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = tm - 1; \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 205 then\n# line 460 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1418 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = @ts - 1\n \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 292 then\n# line 460 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1658 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n escape = { \" \" => '\\s', \"\\r\" => '\\r', \"\\n\" => '\\n', \"\\t\" => '\\t',\n \"\\v\" => '\\v', \"\\f\" => '\\f' }[tok[1]]\n diagnostic :warning, :invalid_escape_use, { :escape => escape }, range\n\n p = @ts - 1\n \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 264 then\n# line 460 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1769 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = @ts - 1\n \tbegin\n\t\t @cs = 156\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 424 then\n# line 460 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 2116 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n emit_comment(@eq_begin_s, @te)\n \tbegin\n\t\t @cs = 156\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 421 then\n# line 460 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 2121 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n\t\tend\n\twhen 84 then\n# line 460 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 2134 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin @eq_begin_s = @ts\n \tbegin\n\t\t @cs = 916\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 3 then\n# line 460 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 2138 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin p = pe - 3 end\n\t\tend\n\twhen 377 then\n# line 594 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |value| emit(:tRATIONAL, Rational(value)) } \t\tend\n# line 1909 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'\n diagnostic :error, :trailing_in_number, { :character => '_' },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = \"0\"\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base))\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 376 then\n# line 595 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |value| emit(:tIMAGINARY, Complex(0, value)) } \t\tend\n# line 1909 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'\n diagnostic :error, :trailing_in_number, { :character => '_' },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = \"0\"\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base))\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 378 then\n# line 596 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |value| emit(:tIMAGINARY, Complex(0, Rational(value))) } \t\tend\n# line 1909 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'\n diagnostic :error, :trailing_in_number, { :character => '_' },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = \"0\"\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base))\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 371 then\n# line 600 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |digits| emit(:tIMAGINARY, Complex(0, Float(digits))) } \t\tend\n# line 1968 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits))\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 372 then\n# line 604 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |digits| emit(:tRATIONAL, Rational(digits)) } \t\tend\n# line 1968 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits))\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 373 then\n# line 605 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |digits| emit(:tIMAGINARY, Complex(0, Rational(digits))) } \t\tend\n# line 1968 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits))\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 109 then\n# line 620 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = \"\"\n\n codepoints = tok(@escape_s + 2, p - 1)\n codepoint_s = @escape_s + 2\n\n codepoints.split(/[ \\t]/).each do |codepoint_str|\n codepoint = codepoint_str.to_i(16)\n\n if codepoint >= 0x110000\n @escape = lambda do\n diagnostic :error, :unicode_point_too_large, nil,\n range(codepoint_s, codepoint_s + codepoint_str.length)\n end\n\n break\n end\n\n @escape += codepoint.chr(Encoding::UTF_8)\n codepoint_s += codepoint_str.length + 1\n end\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 136 then\n# line 620 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = \"\"\n\n codepoints = tok(@escape_s + 2, p - 1)\n codepoint_s = @escape_s + 2\n\n codepoints.split(/[ \\t]/).each do |codepoint_str|\n codepoint = codepoint_str.to_i(16)\n\n if codepoint >= 0x110000\n @escape = lambda do\n diagnostic :error, :unicode_point_too_large, nil,\n range(codepoint_s, codepoint_s + codepoint_str.length)\n end\n\n break\n end\n\n @escape += codepoint.chr(Encoding::UTF_8)\n codepoint_s += codepoint_str.length + 1\n end\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 311 then\n# line 620 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = \"\"\n\n codepoints = tok(@escape_s + 2, p - 1)\n codepoint_s = @escape_s + 2\n\n codepoints.split(/[ \\t]/).each do |codepoint_str|\n codepoint = codepoint_str.to_i(16)\n\n if codepoint >= 0x110000\n @escape = lambda do\n diagnostic :error, :unicode_point_too_large, nil,\n range(codepoint_s, codepoint_s + codepoint_str.length)\n end\n\n break\n end\n\n @escape += codepoint.chr(Encoding::UTF_8)\n codepoint_s += codepoint_str.length + 1\n end\n \t\tend\n# line 1642 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n # Show an error if memorized.\n @escape.call if @escape.respond_to? :call\n\n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value[0].ord)\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 93 then\n# line 643 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n char = @source[p - 1].chr\n @escape = ESCAPES.fetch(char, char)\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 120 then\n# line 643 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n char = @source[p - 1].chr\n @escape = ESCAPES.fetch(char, char)\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 295 then\n# line 643 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n char = @source[p - 1].chr\n @escape = ESCAPES.fetch(char, char)\n \t\tend\n# line 1642 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n # Show an error if memorized.\n @escape.call if @escape.respond_to? :call\n\n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value[0].ord)\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 96 then\n# line 648 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = lambda do\n diagnostic :fatal, :invalid_escape\n end\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 123 then\n# line 648 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = lambda do\n diagnostic :fatal, :invalid_escape\n end\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 298 then\n# line 648 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = lambda do\n diagnostic :fatal, :invalid_escape\n end\n \t\tend\n# line 1642 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n # Show an error if memorized.\n @escape.call if @escape.respond_to? :call\n\n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value[0].ord)\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 98 then\n# line 669 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 125 then\n# line 669 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 300 then\n# line 669 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 1642 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n # Show an error if memorized.\n @escape.call if @escape.respond_to? :call\n\n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value[0].ord)\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 95 then\n# line 676 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s, p).to_i(8) % 0x100) \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 122 then\n# line 676 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s, p).to_i(8) % 0x100) \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 297 then\n# line 676 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s, p).to_i(8) % 0x100) \t\tend\n# line 1642 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n # Show an error if memorized.\n @escape.call if @escape.respond_to? :call\n\n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value[0].ord)\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 112 then\n# line 680 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s + 1, p).to_i(16)) \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 139 then\n# line 680 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s + 1, p).to_i(16)) \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 314 then\n# line 680 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = encode_escape(tok(@escape_s + 1, p).to_i(16)) \t\tend\n# line 1642 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n # Show an error if memorized.\n @escape.call if @escape.respond_to? :call\n\n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value[0].ord)\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 106 then\n# line 683 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = tok(@escape_s + 1, p).to_i(16).chr(Encoding::UTF_8) \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 133 then\n# line 683 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = tok(@escape_s + 1, p).to_i(16).chr(Encoding::UTF_8) \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 308 then\n# line 683 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = tok(@escape_s + 1, p).to_i(16).chr(Encoding::UTF_8) \t\tend\n# line 1642 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n # Show an error if memorized.\n @escape.call if @escape.respond_to? :call\n\n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value[0].ord)\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 111 then\n# line 688 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = lambda do\n diagnostic :fatal, :invalid_hex_escape, nil,\n range(@escape_s - 1, p + 2)\n end\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 138 then\n# line 688 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = lambda do\n diagnostic :fatal, :invalid_hex_escape, nil,\n range(@escape_s - 1, p + 2)\n end\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 313 then\n# line 688 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = lambda do\n diagnostic :fatal, :invalid_hex_escape, nil,\n range(@escape_s - 1, p + 2)\n end\n \t\tend\n# line 1642 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n # Show an error if memorized.\n @escape.call if @escape.respond_to? :call\n\n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value[0].ord)\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 105 then\n# line 703 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = lambda do\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(@escape_s - 1, p)\n end\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 132 then\n# line 703 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = lambda do\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(@escape_s - 1, p)\n end\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 307 then\n# line 703 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = lambda do\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(@escape_s - 1, p)\n end\n \t\tend\n# line 1642 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n # Show an error if memorized.\n @escape.call if @escape.respond_to? :call\n\n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value[0].ord)\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 108 then\n# line 717 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = lambda do\n diagnostic :fatal, :unterminated_unicode, nil,\n range(p - 1, p)\n end\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 135 then\n# line 717 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = lambda do\n diagnostic :fatal, :unterminated_unicode, nil,\n range(p - 1, p)\n end\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 310 then\n# line 717 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = lambda do\n diagnostic :fatal, :unterminated_unicode, nil,\n range(p - 1, p)\n end\n \t\tend\n# line 1642 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n # Show an error if memorized.\n @escape.call if @escape.respond_to? :call\n\n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value[0].ord)\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 94 then\n# line 746 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 121 then\n# line 746 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 296 then\n# line 746 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 1642 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n # Show an error if memorized.\n @escape.call if @escape.respond_to? :call\n\n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value[0].ord)\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 145 then\n# line 752 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 150 then\n# line 752 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 62 then\n# line 792 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 460 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 24 then\n# line 792 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1364 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 26 then\n# line 792 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1380 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 28 then\n# line 792 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1408 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n\twhen 165 then\n# line 792 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1248 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 184 then\n# line 792 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1266 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 192 then\n# line 792 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1298 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 27 then\n# line 792 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1418 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n p = @ts - 1\n \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 222 then\n# line 792 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1423 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 216 then\n# line 792 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1429 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 233 then\n# line 792 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1498 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 245 then\n# line 792 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1519 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 241 then\n# line 792 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1522 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = p - 1; \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 261 then\n# line 792 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1766 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 331 then\n# line 792 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1795 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 328 then\n# line 792 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1798 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \tbegin\n\t\t @cs = 156\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 405 then\n# line 792 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2075 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 347 then\n# line 792 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2078 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \tbegin\n\t\t @cs = 913\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 78 then\n# line 792 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 2131 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 215 then\n# line 955 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n if literal\n literal.start_interp_brace\n end\n \t\tend\n# line 1342 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @lambda_stack.last == @paren_nest\n p = @ts - 1\n \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n\n else\n emit(:tLCURLY, '{', @te - 1, @te)\n @cs = 731; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 235 then\n# line 955 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n if literal\n literal.start_interp_brace\n end\n \t\tend\n# line 1491 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tLBRACE_ARG)\n @cs = 731; end\n\t\tend\n\twhen 323 then\n# line 955 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n if literal\n literal.start_interp_brace\n end\n \t\tend\n# line 1685 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n emit(:tLAMBEG)\n else\n emit_table(PUNCTUATION_BEGIN)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 417 then\n# line 955 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n if literal\n literal.start_interp_brace\n end\n \t\tend\n# line 1820 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n\n if tok == '{'\n emit(:tLAMBEG)\n else # 'do'\n emit(:kDO_LAMBDA)\n end\n else\n if tok == '{'\n emit_table(PUNCTUATION)\n else # 'do'\n emit_do\n end\n end\n\n @cs = 731; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 418 then\n# line 963 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n if literal\n if literal.end_interp_brace_and_try_closing\n if version?(18, 19)\n emit(:tRCURLY, '}', p - 1, p)\n else\n emit(:tSTRING_DEND, '}', p - 1, p)\n end\n\n if literal.saved_herebody_s\n @herebody_s = literal.saved_herebody_s\n end\n\n p = p - 1;\n @cs = (stack_pop);\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n \t\tend\n# line 2042 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit_table(PUNCTUATION)\n @cond.lexpop; @cmdarg.lexpop\n\n if %w\"} ]\".include?(tok)\n @cs = 487;\n else # )\n # fnext expr_endfn; ?\n end\n\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 64 then\n# line 1071 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n\twhen 67 then\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 460 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 168 then\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1248 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 183 then\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1266 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 195 then\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1298 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 218 then\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1426 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 231 then\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1498 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 243 then\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1519 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 267 then\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1766 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 330 then\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1795 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 351 then\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2075 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 80 then\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2131 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 189 then\n# line 1115 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1288 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tFID, tok(@ts, tm), @ts, tm)\n @cs = (arg_or_cmdarg); p = tm - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 279 then\n# line 1115 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1622 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 252 then\n# line 1115 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1754 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 343 then\n# line 1115 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 2018 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if tm == @te\n # Suffix was consumed, e.g. foo!\n emit(:tFID)\n else\n # Suffix was not consumed, e.g. foo!=\n emit(:tIDENTIFIER, tok(@ts, tm), @ts, tm)\n p = tm - 1\n end\n @cs = 451; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 345 then\n# line 1115 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 110 then\n\tbegin begin p = (( @te))-1; end\n\n if @lambda_stack.last == @paren_nest\n @lambda_stack.pop\n\n if tok == '{'\n emit(:tLAMBEG)\n else # 'do'\n emit(:kDO_LAMBDA)\n end\n else\n if tok == '{'\n emit_table(PUNCTUATION)\n else # 'do'\n emit_do\n end\n end\n\n @cs = 731; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 111 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 310; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 112 then\n\tbegin begin p = (( @te))-1; end\n emit(:kCLASS, 'class', @ts, @ts + 5)\n emit(:tLSHFT, '<<', @te - 2, @te)\n @cs = 731; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 113 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 114 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 731; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 115 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n @cs = 495; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 116 then\n\tbegin begin p = (( @te))-1; end\n\n emit_table(KEYWORDS)\n\n if version?(18) && tok == 'not'\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = 451; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 117 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18)\n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 739;\n else\n @cs = (arg_or_cmdarg);\n end\n else\n emit_table(KEYWORDS)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 118 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 119 then\n\tbegin begin p = (( @te))-1; end\n\n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'\n diagnostic :error, :trailing_in_number, { :character => '_' },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = \"0\"\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base))\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 121 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18, 19, 20)\n diagnostic :error,\n :trailing_in_number, { :character => tok(@te - 1, @te) },\n range(@te - 1, @te)\n else\n emit(:tINTEGER, tok(@ts, @te - 1).to_i)\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 122 then\n\tbegin begin p = (( @te))-1; end\n\n if version?(18, 19, 20)\n diagnostic :error,\n :trailing_in_number, { :character => tok(@te - 1, @te) },\n range(@te - 1, @te)\n else\n emit(:tFLOAT, tok(@ts, @te - 1).to_f)\n p = p - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 123 then\n\tbegin begin p = (( @te))-1; end\n\n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits))\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\twhen 125 then\n\tbegin begin p = (( @te))-1; end\n emit(:tCONSTANT)\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 129 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 130 then\n\tbegin begin p = (( @te))-1; end\n\n if tm == @te\n # Suffix was consumed, e.g. foo!\n emit(:tFID)\n else\n # Suffix was not consumed, e.g. foo!=\n emit(:tIDENTIFIER, tok(@ts, tm), @ts, tm)\n p = tm - 1\n end\n @cs = 451; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\nend \n\t\t\tend\n\twhen 190 then\n# line 1116 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1288 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tFID, tok(@ts, tm), @ts, tm)\n @cs = (arg_or_cmdarg); p = tm - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 280 then\n# line 1116 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1622 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 254 then\n# line 1116 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1754 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 344 then\n# line 1116 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 2018 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if tm == @te\n # Suffix was consumed, e.g. foo!\n emit(:tFID)\n else\n # Suffix was not consumed, e.g. foo!=\n emit(:tIDENTIFIER, tok(@ts, tm), @ts, tm)\n p = tm - 1\n end\n @cs = 451; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 281 then\n# line 1121 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1622 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 255 then\n# line 1121 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1754 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 282 then\n# line 1122 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1622 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 256 then\n# line 1122 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1754 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 286 then\n# line 1123 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1622 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 259 then\n# line 1123 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1754 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 285 then\n# line 1124 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1622 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 258 then\n# line 1124 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1 \"NONE\"\n\t\tbegin\n\tcase @act\n\twhen 94 then\n\tbegin begin p = (( @te))-1; end\n emit_table(PUNCTUATION_BEGIN)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 95 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS_BEGIN, @ts, tm)\n p = tm - 1\n @cs = 495; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 96 then\n\tbegin begin p = (( @te))-1; end\n emit_table(KEYWORDS_BEGIN)\n @cs = 731; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\twhen 98 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1\n \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\twhen 99 then\n\tbegin begin p = (( @te))-1; end\n\n emit(:tIDENTIFIER)\n\n if !@static_env.nil? && @static_env.declared?(tok)\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = (arg_or_cmdarg); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\twhen 102 then\n\tbegin begin p = (( @te))-1; end\n p = @ts - 1; \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\nend \n\t\t\tend\n\twhen 283 then\n# line 1125 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 3 \t\tend\n# line 1622 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 257 then\n# line 1125 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 3 \t\tend\n# line 1754 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin p = @ts - 1\n \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 284 then\n# line 1130 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 1622 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tSYMBOL, tok(@ts + 1, tm), @ts, tm)\n p = tm - 1\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 400 then\n# line 1135 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p - 2 \t\tend\n# line 2000 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tCONSTANT, tok(@ts, tm), @ts, tm)\n p = tm - 1; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 214 then\n# line 1141 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n \t\tend\n# line 1336 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tLBRACK, '[', @te - 1, @te)\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 315 then\n# line 1141 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n \t\tend\n# line 1699 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION_BEGIN)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 403 then\n# line 1141 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n \t\tend\n# line 2064 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 206 then\n# line 1148 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n \t\tend\n# line 1317 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if version?(18)\n emit(:tLPAREN2, '(', @te - 1, @te)\n @cs = 731; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n emit(:tLPAREN_ARG, '(', @te - 1, @te)\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 219 then\n# line 1148 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n \t\tend\n# line 1330 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit(:tLPAREN2)\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 225 then\n# line 1148 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n \t\tend\n# line 1445 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit(:tLPAREN_ARG, '(', @te - 1, @te)\n if version?(18)\n @cs = 731; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n end\n\t\tend\n\twhen 272 then\n# line 1148 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n \t\tend\n# line 1699 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION_BEGIN)\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 355 then\n# line 1148 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @cond.push(false); @cmdarg.push(false)\n\n @paren_nest += 1\n \t\tend\n# line 2038 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(PUNCTUATION)\n @cs = 519; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 356 then\n# line 1154 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @paren_nest -= 1\n \t\tend\n# line 2042 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n emit_table(PUNCTUATION)\n @cond.lexpop; @cmdarg.lexpop\n\n if %w\"} ]\".include?(tok)\n @cs = 487;\n else # )\n # fnext expr_endfn; ?\n end\n\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 50 then\n# line 1592 \"lib/parser/lexer.rl\"\n\t\tbegin\n @heredoc_e = p \t\tend\n# line 460 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 287 then\n# line 1593 \"lib/parser/lexer.rl\"\n\t\tbegin\n new_herebody_s = p \t\tend\n# line 1594 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n tok(@ts, @heredoc_e) =~ /^<<(-?)([\"'`]?)(.*)\\2$/\n\n indent = !$1.empty?\n type = '<<' + ($2.empty? ? '\"' : $2)\n delimiter = $3\n\n @cs = (push_literal(type, delimiter, @ts, @heredoc_e, indent));\n\n if @herebody_s.nil?\n @herebody_s = new_herebody_s\n end\n\n p = @herebody_s - 1\n end\n\t\tend\n\twhen 319 then\n# line 1709 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1710 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin emit_table(KEYWORDS_BEGIN, @ts, tm)\n p = tm - 1\n @cs = 495; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 390 then\n# line 1902 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 16; @num_digits_s = p \t\tend\n# line 1908 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 384 then\n# line 1903 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = p \t\tend\n# line 1908 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 387 then\n# line 1904 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = p \t\tend\n# line 1908 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 381 then\n# line 1905 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 2; @num_digits_s = p \t\tend\n# line 1908 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 396 then\n# line 1906 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n# line 1908 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 365 then\n# line 1907 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = @ts \t\tend\n# line 1908 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n\twhen 397 then\n# line 1908 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 593 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |value| emit(:tINTEGER, value) } \t\tend\n\twhen 8 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 460 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 174 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1218 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 25;\t\tend\n\twhen 161 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1222 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 26;\t\tend\n\twhen 157 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1226 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 27;\t\tend\n\twhen 20 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1368 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 51;\t\tend\n\twhen 208 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1381 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 52;\t\tend\n\twhen 21 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1418 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 57;\t\tend\n\twhen 200 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1423 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 58;\t\tend\n\twhen 226 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1455 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 64;\t\tend\n\twhen 39 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1468 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 65;\t\tend\n\twhen 247 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1513 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 72;\t\tend\n\twhen 236 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1517 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 73;\t\tend\n\twhen 250 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1704 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 94;\t\tend\n\twhen 318 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1710 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 95;\t\tend\n\twhen 317 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1716 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 96;\t\tend\n\twhen 54 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1754 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 98;\t\tend\n\twhen 248 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1159 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 99;\t\tend\n\twhen 251 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1782 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 102;\t\tend\n\twhen 413 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1820 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 110;\t\tend\n\twhen 408 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1845 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 111;\t\tend\n\twhen 416 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1855 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 113;\t\tend\n\twhen 409 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1860 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 114;\t\tend\n\twhen 410 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1864 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 115;\t\tend\n\twhen 415 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1868 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 116;\t\tend\n\twhen 407 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1879 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 117;\t\tend\n\twhen 402 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1895 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 118;\t\tend\n\twhen 337 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1909 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 119;\t\tend\n\twhen 367 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1953 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 122;\t\tend\n\twhen 61 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1968 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 123;\t\tend\n\twhen 340 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1996 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 125;\t\tend\n\twhen 332 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1159 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 129;\t\tend\n\twhen 342 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 2018 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 130;\t\tend\n\twhen 151 then\n# line 460 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 860 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p+1\n begin \n if @te == pe\n diagnostic :fatal, :string_eof, nil,\n range(literal.str_s, literal.str_s + 1)\n end\n\n if literal.heredoc?\n line = tok(@herebody_s, @ts).gsub(/\\r+$/, '')\n\n if version?(18, 19, 20)\n # See ruby:c48b4209c\n line = line.gsub(/\\r.*$/, '')\n end\n\n # Try ending the heredoc with the complete most recently\n # scanned line. @herebody_s always refers to the start of such line.\n if literal.nest_and_try_closing(line, @herebody_s, @ts)\n # Adjust @herebody_s to point to the next line.\n @herebody_s = @te\n\n # Continue regular lexing after the heredoc reference (<<END).\n p = literal.heredoc_e - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Ditto.\n @herebody_s = @te\n end\n else\n # Try ending the literal with a newline.\n if literal.nest_and_try_closing(tok, @ts, @te)\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\n if @herebody_s\n # This is a regular literal intertwined with a heredoc. Like:\n #\n # p <<-foo+\"1\n # bar\n # foo\n # 2\"\n #\n # which, incidentally, evaluates to \"bar\\n12\".\n p = @herebody_s - 1\n @herebody_s = nil\n end\n end\n\n if literal.words? && !eof_codepoint?(@source_pts[p])\n literal.extend_space @ts, @te\n else\n # A literal newline is appended if the heredoc was _not_ closed\n # this time (see fbreak above). See also Literal#nest_and_try_closing\n # for rationale of calling #flush_string here.\n literal.extend_string tok, @ts, @te\n literal.flush_string\n end\n end\n\t\tend\n# line 752 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n\twhen 99 then\n# line 643 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n char = @source[p - 1].chr\n @escape = ESCAPES.fetch(char, char)\n \t\tend\n# line 654 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 126 then\n# line 643 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n char = @source[p - 1].chr\n @escape = ESCAPES.fetch(char, char)\n \t\tend\n# line 654 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 301 then\n# line 643 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n char = @source[p - 1].chr\n @escape = ESCAPES.fetch(char, char)\n \t\tend\n# line 654 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 1642 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n # Show an error if memorized.\n @escape.call if @escape.respond_to? :call\n\n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value[0].ord)\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 104 then\n# line 643 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n char = @source[p - 1].chr\n @escape = ESCAPES.fetch(char, char)\n \t\tend\n# line 658 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 131 then\n# line 643 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n char = @source[p - 1].chr\n @escape = ESCAPES.fetch(char, char)\n \t\tend\n# line 658 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 306 then\n# line 643 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n char = @source[p - 1].chr\n @escape = ESCAPES.fetch(char, char)\n \t\tend\n# line 658 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1642 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n # Show an error if memorized.\n @escape.call if @escape.respond_to? :call\n\n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value[0].ord)\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 103 then\n# line 664 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source[p - 1].chr \t\tend\n# line 658 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 130 then\n# line 664 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source[p - 1].chr \t\tend\n# line 658 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 305 then\n# line 664 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source[p - 1].chr \t\tend\n# line 658 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1642 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n # Show an error if memorized.\n @escape.call if @escape.respond_to? :call\n\n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value[0].ord)\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 101 then\n# line 669 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 658 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 128 then\n# line 669 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 658 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 303 then\n# line 669 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = \"\\x7f\" \t\tend\n# line 658 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1642 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n # Show an error if memorized.\n @escape.call if @escape.respond_to? :call\n\n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value[0].ord)\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 97 then\n# line 670 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source[p - 1].chr \t\tend\n# line 654 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 124 then\n# line 670 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source[p - 1].chr \t\tend\n# line 654 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 299 then\n# line 670 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source[p - 1].chr \t\tend\n# line 654 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 1642 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n # Show an error if memorized.\n @escape.call if @escape.respond_to? :call\n\n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value[0].ord)\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 110 then\n# line 703 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = lambda do\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(@escape_s - 1, p)\n end\n \t\tend\n# line 620 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = \"\"\n\n codepoints = tok(@escape_s + 2, p - 1)\n codepoint_s = @escape_s + 2\n\n codepoints.split(/[ \\t]/).each do |codepoint_str|\n codepoint = codepoint_str.to_i(16)\n\n if codepoint >= 0x110000\n @escape = lambda do\n diagnostic :error, :unicode_point_too_large, nil,\n range(codepoint_s, codepoint_s + codepoint_str.length)\n end\n\n break\n end\n\n @escape += codepoint.chr(Encoding::UTF_8)\n codepoint_s += codepoint_str.length + 1\n end\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 137 then\n# line 703 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = lambda do\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(@escape_s - 1, p)\n end\n \t\tend\n# line 620 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = \"\"\n\n codepoints = tok(@escape_s + 2, p - 1)\n codepoint_s = @escape_s + 2\n\n codepoints.split(/[ \\t]/).each do |codepoint_str|\n codepoint = codepoint_str.to_i(16)\n\n if codepoint >= 0x110000\n @escape = lambda do\n diagnostic :error, :unicode_point_too_large, nil,\n range(codepoint_s, codepoint_s + codepoint_str.length)\n end\n\n break\n end\n\n @escape += codepoint.chr(Encoding::UTF_8)\n codepoint_s += codepoint_str.length + 1\n end\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 312 then\n# line 703 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = lambda do\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(@escape_s - 1, p)\n end\n \t\tend\n# line 620 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = \"\"\n\n codepoints = tok(@escape_s + 2, p - 1)\n codepoint_s = @escape_s + 2\n\n codepoints.split(/[ \\t]/).each do |codepoint_str|\n codepoint = codepoint_str.to_i(16)\n\n if codepoint >= 0x110000\n @escape = lambda do\n diagnostic :error, :unicode_point_too_large, nil,\n range(codepoint_s, codepoint_s + codepoint_str.length)\n end\n\n break\n end\n\n @escape += codepoint.chr(Encoding::UTF_8)\n codepoint_s += codepoint_str.length + 1\n end\n \t\tend\n# line 1642 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n # Show an error if memorized.\n @escape.call if @escape.respond_to? :call\n\n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value[0].ord)\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 107 then\n# line 703 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = lambda do\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(@escape_s - 1, p)\n end\n \t\tend\n# line 717 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = lambda do\n diagnostic :fatal, :unterminated_unicode, nil,\n range(p - 1, p)\n end\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 134 then\n# line 703 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = lambda do\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(@escape_s - 1, p)\n end\n \t\tend\n# line 717 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = lambda do\n diagnostic :fatal, :unterminated_unicode, nil,\n range(p - 1, p)\n end\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 309 then\n# line 703 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = lambda do\n diagnostic :fatal, :invalid_unicode_escape, nil,\n range(@escape_s - 1, p)\n end\n \t\tend\n# line 717 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = lambda do\n diagnostic :fatal, :unterminated_unicode, nil,\n range(p - 1, p)\n end\n \t\tend\n# line 1642 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n # Show an error if memorized.\n @escape.call if @escape.respond_to? :call\n\n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value[0].ord)\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 92 then\n# line 752 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n# line 746 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 119 then\n# line 752 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape_s = p\n @escape = nil\n \t\tend\n# line 746 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n diagnostic :fatal, :escape_eof, nil, range(p - 1, p)\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 65 then\n# line 1071 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 460 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n\twhen 167 then\n# line 1071 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1248 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 182 then\n# line 1071 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1266 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 194 then\n# line 1071 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1298 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 217 then\n# line 1071 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1426 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \tbegin\n\t\t @cs = 739\n\t\t_goto_level = _again\n\t\tnext\n\tend\n end\n\t\tend\n\twhen 230 then\n# line 1071 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1498 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 242 then\n# line 1071 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1519 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 266 then\n# line 1071 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1766 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 329 then\n# line 1071 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1795 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 350 then\n# line 1071 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2075 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 79 then\n# line 1071 \"lib/parser/lexer.rl\"\n\t\tbegin\n @sharp_s = p - 1 \t\tend\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 2131 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1;\t\tend\n\twhen 392 then\n# line 1906 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n# line 1908 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 593 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |value| emit(:tINTEGER, value) } \t\tend\n\twhen 362 then\n# line 1907 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = @ts \t\tend\n# line 1908 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 593 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |value| emit(:tINTEGER, value) } \t\tend\n\twhen 374 then\n# line 1908 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 593 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |value| emit(:tINTEGER, value) } \t\tend\n# line 1909 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'\n diagnostic :error, :trailing_in_number, { :character => '_' },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = \"0\"\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base))\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 369 then\n# line 1965 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 599 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |digits| emit(:tFLOAT, Float(digits)) } \t\tend\n# line 1968 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits))\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 366 then\n# line 1966 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 599 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |digits| emit(:tFLOAT, Float(digits)) } \t\tend\n# line 1968 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@ts, @num_suffix_s)\n\n if version?(18, 19, 20)\n emit(:tFLOAT, Float(digits))\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits)\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 221 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 460 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # Record position of a newline for precise location reporting on tNL\n # tokens.\n #\n # This action is embedded directly into c_nl, as it is idempotent and\n # there are no cases when we need to skip it.\n @newline_s = p\n \t\tend\n# line 1423 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 58;\t\tend\n\twhen 29 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 792 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1418 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 57;\t\tend\n\twhen 40 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 792 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1468 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 65;\t\tend\n\twhen 68 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1074 \"lib/parser/lexer.rl\"\n\t\tbegin\n emit_comment(@sharp_s, p == pe ? p - 2 : p) \t\tend\n# line 1849 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 112;\t\tend\n\twhen 31 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1380 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1381 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 52;\t\tend\n\twhen 321 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1709 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1754 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 98;\t\tend\n\twhen 320 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1709 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1159 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 99;\t\tend\n\twhen 393 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1906 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n# line 1909 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 119;\t\tend\n\twhen 102 then\n# line 643 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n char = @source[p - 1].chr\n @escape = ESCAPES.fetch(char, char)\n \t\tend\n# line 654 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 658 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 129 then\n# line 643 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n char = @source[p - 1].chr\n @escape = ESCAPES.fetch(char, char)\n \t\tend\n# line 654 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 658 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 304 then\n# line 643 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n char = @source[p - 1].chr\n @escape = ESCAPES.fetch(char, char)\n \t\tend\n# line 654 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 658 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1642 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n # Show an error if memorized.\n @escape.call if @escape.respond_to? :call\n\n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value[0].ord)\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 100 then\n# line 670 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source[p - 1].chr \t\tend\n# line 654 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 658 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 127 then\n# line 670 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source[p - 1].chr \t\tend\n# line 654 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 658 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 811 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n if literal.nest_and_try_closing('\\\\', @ts, @ts + 1)\n # If the literal is actually closed by the backslash,\n # rewind the input prior to consuming the escape sequence.\n p = @escape_s - 1\n @cs = (pop_literal); \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n else\n # Get the first character after the backslash.\n escaped_char = @source[@escape_s].chr\n\n if literal.munge_escape? escaped_char\n # If this particular literal uses this character as an opening\n # or closing delimiter, it is an escape sequence for that\n # particular character. Write it without the backslash.\n\n if literal.regexp? && escaped_char == '\\\\'\n # Regular expressions should include backslashes in their escaped\n # form.\n literal.extend_string(tok, @ts, @te)\n else\n literal.extend_string(escaped_char, @ts, @te)\n end\n else\n # It does not. So this is an actual escape sequence, yay!\n # Two things to consider here.\n #\n # 1. The `escape' rule should be pure and so won't raise any\n # errors by itself. Instead, it stores them in lambdas.\n #\n # 2. Non-interpolated literals do not go through the aforementioned\n # rule. As \\\\ and \\' (and variants) are munged, the full token\n # should always be written for such literals.\n\n @escape.call if @escape.respond_to? :call\n\n if literal.regexp?\n # Regular expressions should include escape sequences in their\n # escaped form. On the other hand, escaped newlines are removed.\n literal.extend_string(tok.gsub(\"\\\\\\n\", ''), @ts, @te)\n else\n literal.extend_string(@escape || tok, @ts, @te)\n end\n end\n end\n end\n\t\tend\n\twhen 302 then\n# line 670 \"lib/parser/lexer.rl\"\n\t\tbegin\n @escape = @source[p - 1].chr \t\tend\n# line 654 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord & 0x9f)\n \t\tend\n# line 658 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n @escape = encode_escape(@escape[0].ord | 0x80)\n \t\tend\n# line 1642 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n # Show an error if memorized.\n @escape.call if @escape.respond_to? :call\n\n value = @escape || tok(@ts + 1)\n\n if version?(18)\n emit(:tINTEGER, value[0].ord)\n else\n emit(:tCHARACTER, value)\n end\n\n @cs = 739; \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 388 then\n# line 1902 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 16; @num_digits_s = p \t\tend\n# line 1908 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 593 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |value| emit(:tINTEGER, value) } \t\tend\n# line 1909 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'\n diagnostic :error, :trailing_in_number, { :character => '_' },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = \"0\"\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base))\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 382 then\n# line 1903 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = p \t\tend\n# line 1908 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 593 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |value| emit(:tINTEGER, value) } \t\tend\n# line 1909 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'\n diagnostic :error, :trailing_in_number, { :character => '_' },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = \"0\"\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base))\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 385 then\n# line 1904 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = p \t\tend\n# line 1908 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 593 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |value| emit(:tINTEGER, value) } \t\tend\n# line 1909 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'\n diagnostic :error, :trailing_in_number, { :character => '_' },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = \"0\"\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base))\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 379 then\n# line 1905 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 2; @num_digits_s = p \t\tend\n# line 1908 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 593 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |value| emit(:tINTEGER, value) } \t\tend\n# line 1909 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'\n diagnostic :error, :trailing_in_number, { :character => '_' },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = \"0\"\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base))\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 391 then\n# line 1906 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n# line 1908 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 593 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |value| emit(:tINTEGER, value) } \t\tend\n# line 1909 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'\n diagnostic :error, :trailing_in_number, { :character => '_' },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = \"0\"\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base))\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 361 then\n# line 1907 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = @ts \t\tend\n# line 1908 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 593 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |value| emit(:tINTEGER, value) } \t\tend\n# line 1909 \"lib/parser/lexer.rl\"\n\t\tbegin\n @te = p\np = p - 1; begin \n digits = tok(@num_digits_s, @num_suffix_s)\n\n if digits.end_with? '_'\n diagnostic :error, :trailing_in_number, { :character => '_' },\n range(@te - 1, @te)\n elsif digits.empty? && @num_base == 8 && version?(18)\n # 1.8 did not raise an error on 0o.\n digits = \"0\"\n elsif digits.empty?\n diagnostic :error, :empty_numeric\n elsif @num_base == 8 && (invalid_idx = digits.index(/[89]/))\n invalid_s = @num_digits_s + invalid_idx\n diagnostic :error, :invalid_octal, nil,\n range(invalid_s, invalid_s + 1)\n end\n\n if version?(18, 19, 20)\n emit(:tINTEGER, digits.to_i(@num_base))\n p = @num_suffix_s - 1\n else\n @num_xfrm.call(digits.to_i(@num_base))\n end\n \tbegin\n\t\tp += 1\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\n end\n\t\tend\n\twhen 25 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 792 \"lib/parser/lexer.rl\"\n\t\tbegin\n\n # After every heredoc was parsed, @herebody_s contains the\n # position of next token after all heredocs.\n if @herebody_s\n p = @herebody_s\n @herebody_s = nil\n end\n \t\tend\n# line 1380 \"lib/parser/lexer.rl\"\n\t\tbegin\n tm = p \t\tend\n# line 1381 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 52;\t\tend\n\twhen 398 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1908 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 593 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |value| emit(:tINTEGER, value) } \t\tend\n# line 1941 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 121;\t\tend\n\twhen 394 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1906 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 10; @num_digits_s = @ts \t\tend\n# line 1908 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 593 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |value| emit(:tINTEGER, value) } \t\tend\n# line 1941 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 121;\t\tend\n\twhen 364 then\n# line 1 \"NONE\"\n\t\tbegin\n @te = p+1\n\t\tend\n# line 1907 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_base = 8; @num_digits_s = @ts \t\tend\n# line 1908 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_suffix_s = p \t\tend\n# line 593 \"lib/parser/lexer.rl\"\n\t\tbegin\n @num_xfrm = lambda { |value| emit(:tINTEGER, value) } \t\tend\n# line 1941 \"lib/parser/lexer.rl\"\n\t\tbegin\n @act = 121;\t\tend\n# line 20641 \"lib/parser/lexer.rb\"\n\tend\n\tend\n\tend\n\tif _goto_level <= _again\n\tcase _lex_to_state_actions[ @cs] \n\twhen 73 then\n# line 1 \"NONE\"\n\t\tbegin\n @ts = nil;\t\tend\n# line 20651 \"lib/parser/lexer.rb\"\n\tend\n\n\tif @cs == 0\n\t\t_goto_level = _out\n\t\tnext\n\tend\n\tp += 1\n\tif p != pe\n\t\t_goto_level = _resume\n\t\tnext\n\tend\n\tend\n\tif _goto_level <= _test_eof\n\tif p == eof\n\tif _lex_eof_trans[ @cs] > 0\n\t\t_trans = _lex_eof_trans[ @cs] - 1;\n\t\t_goto_level = _eof_trans\n\t\tnext;\n\tend\n\tend\n\n\tend\n\tif _goto_level <= _out\n\t\tbreak\n\tend\nend\n\tend\n\n# line 261 \"lib/parser/lexer.rl\"\n # %\n\n @p = p\n\n if @token_queue.any?\n @token_queue.shift\n elsif @cs == self.class.lex_error\n [ false, [ '$error', range(p - 1, p) ] ]\n else\n [ false, [ '$eof', range(p, p) ] ]\n end\n end", "def rewind(pos = self.last_position)\n self.position = pos\n end", "def peek\n end", "def peek\n end", "def rewind\n @pos = 0\n self\n end", "def rewind\n @pos = 0\n end", "def rewind\n @pos = 0\n end", "def peek\n @front\n end", "def cursor_forward\n if @curpos < @buffer.length \n if addcol(1)==-1 # go forward if you can, else scroll\n @pcol += 1 if @pcol < @width \n end\n @curpos += 1\n end\n # $log.debug \" crusor FORWARD cp:#{@curpos} pcol:#{@pcol} b.l:#{@buffer.length} d_l:#{@display_length} fc:#{@form.col}\"\n end", "def peek\n @tokens.at(@current)\n end", "def advance \n \n @pos += 1\n if @pos > @text.length - 1\n @currentChar = nil\n else \n @currentChar = @text[@pos]\n end\n \n end", "def peek\n @tokens[@pos]\n end", "def advance\n @lookahead = next_token()\n end", "def advance\n buffer_available_data\n @cursor = @buffer.length\n end", "def consume_seek(pos)\n orig = consume_pos\n if pos < 0\n pos = consume_pos + pos\n pos = 0 if pos < 0\n end\n @consume_pos = pos\n orig\n end", "def peek(*args)\n with_saved_pos { read(*args) }\n end", "def rewind\n @cursor = 0\n end", "def shift\n ret = @tokens.shift\n get_next_tokens\n ret\n end", "def consume()\n la(1)\n return @lookahead.shift\n end", "def peek\n @tokens[@position]\n end", "def peek\n @tok ||= read_token\n end", "def scroll_forward\n @oldindex = @current_index\n @current_index += @scrollatrows\n @prow = @current_index - @scrollatrows\n end", "def move_to_start\n @cursor = 0\n end", "def move_to_start\n @cursor = 0\n end", "def fwd_lit_nolt(hint)\n while lit = next_input_element(hint) and lit.ws?\n end\n lit\n end", "def next_token; @stack.shift; end", "def scroll_forward\n #@oldindex = @current_index\n @current_index += @scrollatrows\n @prow = @current_index - @scrollatrows\n end", "def read_and_advance(length)\n data = nil\n cursor_start = current.position\n case current.direction\n when :forward\n data = @buffer.slice(current.position, length)\n adjust(length)\n when :backward\n adjust(-length)\n data = @buffer.slice(current.position, length)\n end\n\n record_trace(cursor_start, data.bytes, current.name)\n data\n end", "def peek # :nodoc:\n @tokens.peek\n end", "def rewind\n @offset = 0\n end", "def peek(src)\n ctx = src.is_a?(Context) ? src : Context.new(src)\n starting_pos = ctx.bio.pos\n begin\n parse ctx\n ensure\n ctx.bio.restore_to starting_pos\n end\n end", "def advance\n goto((@current_frame + 1) % @frames)\n end", "def advance_position\n matched_fragment = @str[@[email protected]]\n new_lines = matched_fragment.count(\"\\n\")\n if new_lines > 0\n characters_after_last_newline = matched_fragment.size - matched_fragment.rindex(\"\\n\") - 1\n [@line + new_lines, 1 + characters_after_last_newline]\n else\n [@line, @char + matched_fragment.size]\n end\n end", "def from_left(cur)\n\t\tmove(cur, 0, -1)\n\tend", "def forward\n history_navigate(delta: 1)\n end", "def peek\n @tape[-1]\n end", "def sol_update_peek\n @peek = gets\n if (@peek.nil?)\n close\n else\n @offset += 1\n end\n end", "def advance\n @current += 1 unless at_end?\n return previous\n end", "def rewind()\n @ole.Rewind()\n end", "def cursor_forward\n $multiplier = 1 if $multiplier == 0\n if @curpos < @cols\n @curpos += $multiplier\n if @curpos > @cols\n @curpos = @cols\n end\n @repaint_required = true\n end\n $multiplier = 0\n end", "def peek\r\n @peek_buffer ||= get unless eoln?\r\n end", "def step_forward(amount = 1)\n @token_index += amount\n current_token\n end", "def decrement_position\n return unless in_list?\n set_list_position(current_position - 1)\n end", "def tell; @next_block_pos end", "def move_forward\n Proc.new { | spaces | \"#{ @column }#{ forward( spaces ) }\" }\n end", "def left(n = 1)\n @cursor = [0, @cursor - n].max\n end", "def left(n = 1)\n @cursor = [0, @cursor - n].max\n end", "def move_to_next_line()\r\n while @seek_ptr < @len && @fileBuf.at(@seek_ptr) != \"\\n\"\r\n @seek_ptr = @seek_ptr + 1\r\n end\r\n end", "def prev_char\n self.cursor -= 1\n end", "def lshift!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 43 )\n\n type = LSHIFT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 164:10: '<<'\n match( \"<<\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 43 )\n\n end", "def forward_word\n $multiplier = 1 if !$multiplier or $multiplier == 0\n line = @current_index\n buff = @list[line]\n pos = @curpos\n $multiplier.times {\n found = buff.index(/[[:punct:][:space:]]/, pos)\n if !found\n # if not found, we've lost a counter\n line += 1 # unless eof\n buff = @list[line]\n pos = 0\n else\n pos = found + 1\n end\n $log.debug \" forward_word: pos #{pos} line #{line} buff: #{buff}\"\n }\n @current_index = line\n @curpos = pos\n @buffer = @list[@current_index]\n set_form_row\n set_form_col pos\n @repaint_required = true\n end", "def prev_word\n line = get.reverse\n pos = get.size - cursor\n\n return unless md = line.match(BACKWARD_WORD, pos)\n self.cursor = (line.size - md.offset(0).last)\n end", "def goto_start\n @oldindex = @current_index\n $multiplier ||= 0\n if $multiplier > 0\n goto_line $multiplier\n return\n end\n @current_index = 0\n @curpos = @pcol = @prow = 0\n @prow = 0\n $multiplier = 0\n end", "def peek()\n @stack.peek\n end", "def lookahead(ll_index = 0)\n retun nil if @current + ll_index >= tokens.size\n tokens[@current + ll_index]\n end", "def seek(index)\n if (index <= @p)\n @p = index # just jump; don't update stream state (line, ...)\n return\n end\n # seek forward, consume until p hits index\n while (@p < index)\n consume\n end\n end", "def position_for_next_harvest()\n turn_right()\n move()\n turn_right()\n end", "def next_word\n return unless md = get.match(FORWARD_WORD, cursor)\n self.cursor = md.offset(0).last\n end", "def peek(position = nil)\n raise \"No block given\" unless block_given?\n\n push(position)\n result = yield(self)\n pop\n result\n end", "def perform_continue( position, action, estream, new_branch_info = nil )\n node = position.determinant()\n \n estream.puts \"===> SHIFT #{node.description} AND GOTO #{action.to_state.number}\" if estream\n next_position = position.push( node, action.to_state )\n\n #\n # With the next_position chosen, we need to chain forward the branch information.\n # If one is supplied, we use it. Otherwise, we copy forward one from the previous\n # position: either the context info if this action disambiguates the parse, or\n # the existing one otherwise.\n \n if new_branch_info.exists? then\n next_position.branch_info = new_branch_info\n elsif position.branch_info.exists? then\n next_position.branch_info = position.branch_info\n end \n \n return next_position \n end", "def peek!(*_arg0); end", "def advance(distance = 1)\n @position.x += distance\n end", "def move_left\n @memory_position -= 1\n @memory_position = 0xfffffe if @memory_position < 0\n end", "def rewind() end", "def rewind() end", "def rewind() end", "def rewind() end", "def consume_pos\n @consume_pos ||= 0\n end", "def rewind( before_token )\n @lexer.reset_position( before_token.start_position )\n @lookahead.clear\n end", "def seek_forward(amount = opts[:seek_size])\n seek_by(amount)\n end", "def next_piece\n @current_block = MyPiece.next_piece(self)\n @current_pos = nil\n end", "def move_ahead\n @posx, @posy = coords_ahead\n activate\n end", "def advance_tail\n\t\t\t@tail_index += 1\n\t\t\t@tail_index = 0 if @tail_index == @queue.size\n\t\tend", "def jump_to_line l\n l = l.clamp 0, num_lines - 1\n return if @topline == l\n @topline = l\n @botline = [l + buffer.content_height, num_lines].min\n buffer.mark_dirty!\n end", "def advance\n @level = -1\n loop do\n read_line\n return if @line.nil?\n break if @line !~ /^\\s*$/\n end\n @level, @tag, @data = @line.chomp.split(/\\s+/, 3)\n @level = @level.to_i\n end", "def rewind\n @history_idx = -1\n end", "def from_left; end" ]
[ "0.67667997", "0.65343904", "0.6196432", "0.6132759", "0.61087483", "0.6024115", "0.60034144", "0.59756315", "0.59578156", "0.59487754", "0.5940611", "0.5933561", "0.5921195", "0.59079677", "0.59079677", "0.58957845", "0.5888137", "0.5881644", "0.5878967", "0.5874191", "0.5874191", "0.5874191", "0.58223045", "0.5801777", "0.578759", "0.57850015", "0.57850015", "0.5779458", "0.5761503", "0.5761503", "0.5741279", "0.5719477", "0.56993955", "0.5689658", "0.56787235", "0.5677947", "0.567387", "0.5667543", "0.5660954", "0.5660146", "0.56373304", "0.5631272", "0.5626886", "0.56123775", "0.5605794", "0.5575999", "0.5575999", "0.5571809", "0.55583036", "0.55573106", "0.54955596", "0.5482825", "0.54793286", "0.54734313", "0.54722446", "0.54578924", "0.54451656", "0.5429637", "0.5426666", "0.54230094", "0.5405866", "0.53887206", "0.53821516", "0.53800535", "0.53702223", "0.536134", "0.5360567", "0.5346654", "0.53453505", "0.53453505", "0.5329983", "0.5321469", "0.5315848", "0.5309933", "0.53059477", "0.53022987", "0.5301358", "0.52919495", "0.52916723", "0.5286215", "0.5284647", "0.5280468", "0.52730066", "0.52609634", "0.52473724", "0.52472216", "0.5243224", "0.5243224", "0.5243224", "0.5243224", "0.5241601", "0.5241094", "0.52227336", "0.52177656", "0.521126", "0.5210156", "0.520615", "0.5205441", "0.5205305", "0.5200829" ]
0.72775227
0
fetch next literal. position is forwarded. white spaces and line terminators are skipped and ignored.
def fwd_lit(hint) while lit = next_input_element(hint) and (lit.ws? or lit.lt?) end lit end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_lit\n case l.front.type\n when :str then parse_str_lit\n when :chr then parse_char_lit\n when :num then parse_num_lit\n else\n error \"expected a literal\"\n end\n end", "def literal\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 22 )\n\n\n value = nil\n\n\n a = nil\n\n\n begin\n # at line 142:3: (a= INTEGER |a= FLOAT |a= BOOLEAN |a= STRING |a= CHAR )\n alt_38 = 5\n case look_38 = @input.peek( 1 )\n when INTEGER then alt_38 = 1\n when FLOAT then alt_38 = 2\n when BOOLEAN then alt_38 = 3\n when STRING then alt_38 = 4\n when CHAR then alt_38 = 5\n else\n @state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )\n\n\n\n raise NoViableAlternative( \"\", 38, 0 )\n\n end\n case alt_38\n when 1\n # at line 142:5: a= INTEGER\n a = match( INTEGER, TOKENS_FOLLOWING_INTEGER_IN_literal_1037 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Int, a.text) \n # <-- action\n end\n\n\n when 2\n # at line 143:5: a= FLOAT\n a = match( FLOAT, TOKENS_FOLLOWING_FLOAT_IN_literal_1047 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Float, a.text) \n # <-- action\n end\n\n\n when 3\n # at line 144:5: a= BOOLEAN\n a = match( BOOLEAN, TOKENS_FOLLOWING_BOOLEAN_IN_literal_1059 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Bool, a.text) \n # <-- action\n end\n\n\n when 4\n # at line 145:5: a= STRING\n a = match( STRING, TOKENS_FOLLOWING_STRING_IN_literal_1069 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:String, a.text) \n # <-- action\n end\n\n\n when 5\n # at line 146:5: a= CHAR\n a = match( CHAR, TOKENS_FOLLOWING_CHAR_IN_literal_1080 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Char, a.text) \n # <-- action\n end\n\n\n end\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 22 )\n\n\n end\n\n return value\n end", "def next_token\n result = peek_token\n @start = @finish\n return result if @start >= @expr.length\n\n if @expr[@start].numeric?\n @finish = @start + 1\n while @finish < @expr.length && @expr[@finish].to_s.numeric?\n @finish = @finish + 1\n end\n else\n @finish = @start + 1\n end\n result\n end", "def _literal\n\n _save = self.pos\n while true # choice\n _tmp = apply(:_number)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_string)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_literal unless _tmp\n return _tmp\n end", "def literal\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 33 )\n return_value = LiteralReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n root_0 = nil\n\n _last = _first_0 = nil\n string_literal267 = nil\n __IVAR268__ = nil\n __ID269__ = nil\n string_literal270 = nil\n string_literal271 = nil\n string_literal272 = nil\n string_literal273 = nil\n __NUMBER274__ = nil\n __STRING275__ = nil\n __DOC276__ = nil\n __REGEX277__ = nil\n __ARRAY278__ = nil\n __OBJECT280__ = nil\n string_literal282 = nil\n __ID283__ = nil\n string_literal286 = nil\n argument279 = nil\n property_definition281 = nil\n parameters284 = nil\n statement_block285 = nil\n parameters287 = nil\n statement_block288 = nil\n\n tree_for_string_literal267 = nil\n tree_for_IVAR268 = nil\n tree_for_ID269 = nil\n tree_for_string_literal270 = nil\n tree_for_string_literal271 = nil\n tree_for_string_literal272 = nil\n tree_for_string_literal273 = nil\n tree_for_NUMBER274 = nil\n tree_for_STRING275 = nil\n tree_for_DOC276 = nil\n tree_for_REGEX277 = nil\n tree_for_ARRAY278 = nil\n tree_for_OBJECT280 = nil\n tree_for_string_literal282 = nil\n tree_for_ID283 = nil\n tree_for_string_literal286 = nil\n\n begin\n # at line 229:3: ( 'this' | IVAR | ID | 'null' | 'true' | 'false' | 'undefined' | NUMBER | STRING | DOC | REGEX | ^( ARRAY ( argument )* ) | ^( OBJECT ( property_definition )* ) | ^( 'function' ( ID )? parameters statement_block ) | ^( '->' ( parameters )? statement_block ) )\n alt_41 = 15\n case look_41 = @input.peek( 1 )\n when THIS then alt_41 = 1\n when IVAR then alt_41 = 2\n when ID then alt_41 = 3\n when NULL then alt_41 = 4\n when TRUE then alt_41 = 5\n when FALSE then alt_41 = 6\n when UNDEFINED then alt_41 = 7\n when NUMBER then alt_41 = 8\n when STRING then alt_41 = 9\n when DOC then alt_41 = 10\n when REGEX then alt_41 = 11\n when ARRAY then alt_41 = 12\n when OBJECT then alt_41 = 13\n when FUNCTION then alt_41 = 14\n when ARROW then alt_41 = 15\n else\n raise NoViableAlternative( \"\", 41, 0 )\n end\n case alt_41\n when 1\n root_0 = @adaptor.create_flat_list\n\n\n # at line 229:5: 'this'\n _last = @input.look\n string_literal267 = match( THIS, TOKENS_FOLLOWING_THIS_IN_literal_1643 )\n\n tree_for_string_literal267 = @adaptor.copy_node( string_literal267 )\n\n @adaptor.add_child( root_0, tree_for_string_literal267 )\n\n\n\n when 2\n root_0 = @adaptor.create_flat_list\n\n\n # at line 230:5: IVAR\n _last = @input.look\n __IVAR268__ = match( IVAR, TOKENS_FOLLOWING_IVAR_IN_literal_1649 )\n\n tree_for_IVAR268 = @adaptor.copy_node( __IVAR268__ )\n\n @adaptor.add_child( root_0, tree_for_IVAR268 )\n\n\n\n when 3\n root_0 = @adaptor.create_flat_list\n\n\n # at line 231:5: ID\n _last = @input.look\n __ID269__ = match( ID, TOKENS_FOLLOWING_ID_IN_literal_1655 )\n\n tree_for_ID269 = @adaptor.copy_node( __ID269__ )\n\n @adaptor.add_child( root_0, tree_for_ID269 )\n\n\n\n when 4\n root_0 = @adaptor.create_flat_list\n\n\n # at line 232:5: 'null'\n _last = @input.look\n string_literal270 = match( NULL, TOKENS_FOLLOWING_NULL_IN_literal_1661 )\n\n tree_for_string_literal270 = @adaptor.copy_node( string_literal270 )\n\n @adaptor.add_child( root_0, tree_for_string_literal270 )\n\n\n\n when 5\n root_0 = @adaptor.create_flat_list\n\n\n # at line 233:5: 'true'\n _last = @input.look\n string_literal271 = match( TRUE, TOKENS_FOLLOWING_TRUE_IN_literal_1667 )\n\n tree_for_string_literal271 = @adaptor.copy_node( string_literal271 )\n\n @adaptor.add_child( root_0, tree_for_string_literal271 )\n\n\n\n when 6\n root_0 = @adaptor.create_flat_list\n\n\n # at line 234:5: 'false'\n _last = @input.look\n string_literal272 = match( FALSE, TOKENS_FOLLOWING_FALSE_IN_literal_1673 )\n\n tree_for_string_literal272 = @adaptor.copy_node( string_literal272 )\n\n @adaptor.add_child( root_0, tree_for_string_literal272 )\n\n\n\n when 7\n root_0 = @adaptor.create_flat_list\n\n\n # at line 235:5: 'undefined'\n _last = @input.look\n string_literal273 = match( UNDEFINED, TOKENS_FOLLOWING_UNDEFINED_IN_literal_1679 )\n\n tree_for_string_literal273 = @adaptor.copy_node( string_literal273 )\n\n @adaptor.add_child( root_0, tree_for_string_literal273 )\n\n\n\n when 8\n root_0 = @adaptor.create_flat_list\n\n\n # at line 236:5: NUMBER\n _last = @input.look\n __NUMBER274__ = match( NUMBER, TOKENS_FOLLOWING_NUMBER_IN_literal_1685 )\n\n tree_for_NUMBER274 = @adaptor.copy_node( __NUMBER274__ )\n\n @adaptor.add_child( root_0, tree_for_NUMBER274 )\n\n\n\n when 9\n root_0 = @adaptor.create_flat_list\n\n\n # at line 237:5: STRING\n _last = @input.look\n __STRING275__ = match( STRING, TOKENS_FOLLOWING_STRING_IN_literal_1691 )\n\n tree_for_STRING275 = @adaptor.copy_node( __STRING275__ )\n\n @adaptor.add_child( root_0, tree_for_STRING275 )\n\n\n\n when 10\n root_0 = @adaptor.create_flat_list\n\n\n # at line 238:5: DOC\n _last = @input.look\n __DOC276__ = match( DOC, TOKENS_FOLLOWING_DOC_IN_literal_1697 )\n\n tree_for_DOC276 = @adaptor.copy_node( __DOC276__ )\n\n @adaptor.add_child( root_0, tree_for_DOC276 )\n\n\n\n when 11\n root_0 = @adaptor.create_flat_list\n\n\n # at line 239:5: REGEX\n _last = @input.look\n __REGEX277__ = match( REGEX, TOKENS_FOLLOWING_REGEX_IN_literal_1703 )\n\n tree_for_REGEX277 = @adaptor.copy_node( __REGEX277__ )\n\n @adaptor.add_child( root_0, tree_for_REGEX277 )\n\n\n\n when 12\n root_0 = @adaptor.create_flat_list\n\n\n # at line 240:5: ^( ARRAY ( argument )* )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __ARRAY278__ = match( ARRAY, TOKENS_FOLLOWING_ARRAY_IN_literal_1711 )\n\n tree_for_ARRAY278 = @adaptor.copy_node( __ARRAY278__ )\n\n root_1 = @adaptor.become_root( tree_for_ARRAY278, root_1 )\n\n\n\n if @input.peek == DOWN\n match( DOWN, nil )\n # at line 240:14: ( argument )*\n while true # decision 37\n alt_37 = 2\n look_37_0 = @input.peek( 1 )\n\n if ( look_37_0.between?( AMP, AMP_ASGN ) || look_37_0 == POST_DECR || look_37_0.between?( GEQ, AREF ) || look_37_0.between?( GREATER, HAT ) || look_37_0.between?( ARROW, HAT_ASGN ) || look_37_0 == ASGN || look_37_0 == REGEX || look_37_0 == IN || look_37_0 == INCR || look_37_0.between?( INSTANCEOF, RSHIFT3 ) || look_37_0 == RSHIFT3_ASGN || look_37_0.between?( RSHIFT_ASGN, COLON ) || look_37_0 == LEQ || look_37_0.between?( LESS, SLASH ) || look_37_0 == SLASH_ASGN || look_37_0.between?( STAR, DECR ) || look_37_0 == STAR_ASGN || look_37_0 == LSHIFT || look_37_0.between?( DELETE, THIS ) || look_37_0.between?( MINUS, TILDE ) || look_37_0.between?( MINUS_ASGN, MOD ) || look_37_0.between?( MOD_ASGN, TYPEOF ) || look_37_0.between?( NEQ, UMINUS ) || look_37_0.between?( NEQQ, UNDEFINED ) || look_37_0 == NEW || look_37_0 == NOT || look_37_0.between?( NULL, UPLUS ) || look_37_0 == OBJECT || look_37_0.between?( EQ, OR_ASGN ) || look_37_0 == FALSE || look_37_0 == PIPE || look_37_0 == PIPE_ASGN || look_37_0 == PLUS || look_37_0.between?( ID, DOC ) )\n alt_37 = 1\n\n end\n case alt_37\n when 1\n # at line 240:14: argument\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_argument_IN_literal_1713 )\n argument279 = argument\n @state.following.pop\n\n @adaptor.add_child( root_1, argument279.tree )\n\n\n else\n break # out of loop for decision 37\n end\n end # loop for decision 37\n\n match( UP, nil )\n end\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 13\n root_0 = @adaptor.create_flat_list\n\n\n # at line 241:5: ^( OBJECT ( property_definition )* )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __OBJECT280__ = match( OBJECT, TOKENS_FOLLOWING_OBJECT_IN_literal_1724 )\n\n tree_for_OBJECT280 = @adaptor.copy_node( __OBJECT280__ )\n\n root_1 = @adaptor.become_root( tree_for_OBJECT280, root_1 )\n\n\n\n if @input.peek == DOWN\n match( DOWN, nil )\n # at line 241:15: ( property_definition )*\n while true # decision 38\n alt_38 = 2\n look_38_0 = @input.peek( 1 )\n\n if ( look_38_0 == GET || look_38_0 == COLON || look_38_0 == SET )\n alt_38 = 1\n\n end\n case alt_38\n when 1\n # at line 241:15: property_definition\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_property_definition_IN_literal_1726 )\n property_definition281 = property_definition\n @state.following.pop\n\n @adaptor.add_child( root_1, property_definition281.tree )\n\n\n else\n break # out of loop for decision 38\n end\n end # loop for decision 38\n\n match( UP, nil )\n end\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 14\n root_0 = @adaptor.create_flat_list\n\n\n # at line 242:5: ^( 'function' ( ID )? parameters statement_block )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal282 = match( FUNCTION, TOKENS_FOLLOWING_FUNCTION_IN_literal_1737 )\n\n tree_for_string_literal282 = @adaptor.copy_node( string_literal282 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal282, root_1 )\n\n\n\n match( DOWN, nil )\n # at line 242:19: ( ID )?\n alt_39 = 2\n look_39_0 = @input.peek( 1 )\n\n if ( look_39_0 == ID )\n alt_39 = 1\n end\n case alt_39\n when 1\n # at line 242:19: ID\n _last = @input.look\n __ID283__ = match( ID, TOKENS_FOLLOWING_ID_IN_literal_1739 )\n\n tree_for_ID283 = @adaptor.copy_node( __ID283__ )\n\n @adaptor.add_child( root_1, tree_for_ID283 )\n\n\n\n end\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_parameters_IN_literal_1742 )\n parameters284 = parameters\n @state.following.pop\n\n @adaptor.add_child( root_1, parameters284.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_statement_block_IN_literal_1744 )\n statement_block285 = statement_block\n @state.following.pop\n\n @adaptor.add_child( root_1, statement_block285.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 15\n root_0 = @adaptor.create_flat_list\n\n\n # at line 243:5: ^( '->' ( parameters )? statement_block )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal286 = match( ARROW, TOKENS_FOLLOWING_ARROW_IN_literal_1754 )\n\n tree_for_string_literal286 = @adaptor.copy_node( string_literal286 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal286, root_1 )\n\n\n\n match( DOWN, nil )\n # at line 243:13: ( parameters )?\n alt_40 = 2\n look_40_0 = @input.peek( 1 )\n\n if ( look_40_0 == PARAMS )\n alt_40 = 1\n end\n case alt_40\n when 1\n # at line 243:13: parameters\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_parameters_IN_literal_1756 )\n parameters287 = parameters\n @state.following.pop\n\n @adaptor.add_child( root_1, parameters287.tree )\n\n\n end\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_statement_block_IN_literal_1759 )\n statement_block288 = statement_block\n @state.following.pop\n\n @adaptor.add_child( root_1, statement_block288.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n end\n return_value.tree = @adaptor.rule_post_processing( root_0 )\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 33 )\n\n end\n \n return return_value\n end", "def literal(buffer)\n reader = lambda { |string = ''|\n buffer.major_mode.read(1) do |event|\n if unicode = event.unicode\n string += unicode # copy\n buffer.message string.inspect\n\n case result = literal_handle(buffer, string)\n when nil\n reader.call(string)\n when String\n literal_insert(buffer, result)\n end\n else\n return # Unverrichteter Dinge\n end\n end\n }\n\n reader.call\n end", "def read_next()\n return nil if @at_end\n\n begin\n pos = @marker.position\n\n if @marker.character == ?\\n\n pos.line += 1\n pos.column = 0\n end\n\n @marker.character = @reader.next\n @marker.source_index += 1\n pos.column += 1\n rescue StopIteration\n @at_end = true\n @marker.character = nil\n end\n\n @marker.character\n end", "def next_token\n\n if @ss.bol?\n @line+=1\n @[email protected]\n end\n\n position=[@line,@ss.pos-@old_pos+1]\n\n return :eos if @ss.eos?\n\n case\n when text = @ss.scan(NEWLINE)\n next_token()\n when text = @ss.scan(SPACE)\n next_token()\n when text = @ss.scan(COMMENT)\n next_token()\n when text = @ss.scan(ARROW)\n return Token.new [:arrow,text,position]\n when text = @ss.scan(LT)\n return Token.new [:lt,text,position]\n when text = @ss.scan(LBRACK)\n return Token.new [:lbrack,text,position]\n when text = @ss.scan(RBRACK)\n return Token.new [:rbrack,text,position]\n when text = @ss.scan(IDENTIFIER)\n case\n when value = text.match(IDENT)\n return Token.new [:IDENT,text,position]\n when value = text.match(FLOAT)\n return Token.new [:FLOAT,text,position]\n when value = text.match(INT)\n return Token.new [:INT,text,position]\n when value = text.match(STRING)\n return Token.new [:STRING,text,position]\n when value = text.match(MODULE)\n return Token.new [:module,text,position]\n when value = text.match(CLASS)\n return Token.new [:class,text,position]\n when value = text.match(END_)\n return Token.new [:end,text,position]\n when value = text.match(ATTR)\n return Token.new [:attr,text,position]\n when value = text.match(LPAREN)\n return Token.new [:lparen,text,position]\n when value = text.match(RPAREN)\n return Token.new [:rparen,text,position]\n else\n return Token.new [:identifier,text,position]\n end\n else\n x = @ss.getch\n return Token.new [x, x,position]\n end\n end", "def next()\n if @ss.scan_until(token_re)\n term = @ss.matched\n term_end = @ss.pos\n term_start = term_end - term.size\n else\n return nil\n end\n\n return Token.new(normalize(term), term_start, term_end)\n end", "def next\n ret = peek_next\n @str.slice! @last_re if ret.type != :eos\n\n ret\n end", "def next_token; end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n if ss.peek(1) == \"\\n\"\n self.lineno += 1\n # line starts 1 position after the newline\n self.start_of_current_line_pos = ss.pos + 1\n end\n self.old_pos = ss.pos\n token =\n case state\n when nil then\n case\n when ss.skip(/[ \\t]+/) then\n # do nothing\n when ss.skip(/\\/\\/[^\\r\\n]*/) then\n # do nothing\n when text = ss.scan(/\\r|\\n/) then\n newline text\n when text = ss.scan(/[!=<>]=?/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/[(){},;.\\-+\\/*]/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/#{DIGIT}+(\\.#{DIGIT}+)?/) then\n action { [:NUMBER, text] }\n when text = ss.scan(/nil/) then\n action { [:NIL, text] }\n when text = ss.scan(/false/) then\n action { [:FALSE, text] }\n when text = ss.scan(/true/) then\n action { [:TRUE, text] }\n when text = ss.scan(/#{ALPHA}(#{ALPHA}|#{DIGIT})*/) then\n action { [:IDENTIFIER, text] }\n when ss.skip(/\"\"/) then\n action { [:STRING, '\"\"'] }\n when ss.skip(/\"/) then\n [:state, :IN_STRING]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n when :IN_STRING then\n case\n when text = ss.scan(/[^\"]+/) then\n action { [:STRING, \"\\\"#{text}\\\"\"] }\n when ss.skip(/\"/) then\n [:state, nil]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def peek_next\n fail 'No string specified' unless @str\n\n return Token.new(:eos) if skip_space == :eos\n\n PATTERNS.each do |re, func|\n re.match(@str) do |mat|\n @last_re = re # This is what will be removed\n mat = mat.to_s\n return func.is_a?(Symbol) ? send(func, mat) : instance_exec(mat, &func)\n end\n end\n end", "def process_lit(exp)\n # TODO what about floats and big numbers?\n\n value = exp.shift\n c_type = exp.c_type\n case c_type\n when CType.long, CType.float then\n return value.to_s\n when CType.symbol then\n return value.to_s.inspect # HACK wrong! write test!\n else\n raise \"Bug! no: Unknown literal #{value}:#{value.class}\"\n end\n end", "def next_token\n @sy = @tokenizer.next_token\n \n # ignore EOL tokens since no productions would accept them\n while @sy.type == TokenType::EOL_TOKEN\n @sy = @tokenizer.next_token\n end\n end", "def literal_token\n if match = @chunk.match(OPERATOR)\n value, _ = *match\n else\n value = @chunk[0]\n end\n tag = value\n\n if COMPOUND_ASSIGN.include?(value)\n tag = :COP\n else\n case value\n when '(', '{', '[' then @ends.push(INVERSES[value])\n when ')', '}', ']'\n prev = @tokens[-1]\n pair(value)\n tokens.delete_at(-1) if prev && prev[0] == :TERM\n end\n end\n token(tag, value)\n value.size\n end", "def literal; end", "def lex_en_expr_beg; end", "def lex_en_expr_beg; end", "def lex_en_expr_beg; end", "def next_token\n return @extra_tokens.pop unless @extra_tokens.empty?\n\n skip_whitespace\n c = @sql[@pos, 1]\n return next_string(c) if quote?(c)\n\n first_is_identifier_char = identifier_char?(c)\n t = c\n @pos += 1\n while @pos < @length\n c = @sql[@pos, 1]\n break if c == ' '\n\n this_is_identifier_char = identifier_char?(c)\n break if first_is_identifier_char != this_is_identifier_char && @length > 0\n break if !this_is_identifier_char && quote?(c)\n\n t << c\n @pos += 1\n end\n\n case t\n when ''\n nil\n when /^\\d+$/\n t.to_i\n else\n t\n end\n end", "def octal_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 70 )\n\n\n\n type = OctalLiteral\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 527:16: '0' ( '0' .. '7' )+\n match( 0x30 )\n # at file 527:20: ( '0' .. '7' )+\n match_count_22 = 0\n while true\n alt_22 = 2\n look_22_0 = @input.peek( 1 )\n\n if ( look_22_0.between?( 0x30, 0x37 ) )\n alt_22 = 1\n\n end\n case alt_22\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x30, 0x37 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n match_count_22 > 0 and break\n eee = EarlyExit(22)\n\n\n raise eee\n end\n match_count_22 += 1\n end\n\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 70 )\n\n\n end", "def next_token\n #dputs \"@line: \" + @line\n if @state == :normal\n while true\n temp = _next_token\n unless temp == \"#white_space\" || temp == \"#comment\"\n break\n end\n end\n #dputs \"token: \" + temp\n @current_token = temp\n return temp\n else\n return :Terminate\n end\n \n end", "def lex_en_line_begin; end", "def lex_en_line_begin; end", "def lex_en_line_begin; end", "def next_item\n return @last_lexeme if @last_lexeme[0].nil?\n while true\n @line = next_line if buffer_empty?\n if @line.nil?\n lexeme = [nil, @line_no, 1]\n break\n end\n\n # Skip whitespaces\n while space?(@line[@pos])\n @pos += 1\n end\n\n # Skip triple dot characters (join lines)\n if @line[@pos, 4] == \"...\\n\" || @line[@pos, 2] == \"…\\n\"\n line_no, pos = @line_no, @pos + 1\n @line, @pos = next_line, 0\n if @line.nil? || @line.strip.empty?\n raise SyntaxError.new(line_no, pos, 'Line continuation may not be followed by an empty line')\n end\n next\n end\n\n # Skip one line comments\n if @line[@pos, 3] == 'BTW'\n @pos = @line.length - 1\n end\n # and multiline ones\n if @last_lexeme[0] == \"\\n\" && @line[@pos, 4] == 'OBTW'\n tldr_found, line_no, pos = false, @line_no, @pos + 1\n while true\n @line = next_line\n break if @line.nil?\n m = @line.chomp.match(/(^|\\s+)TLDR\\s*(,|$)/)\n unless m.nil?\n tldr_found = true\n @pos = m.end(0)\n break\n end\n end\n unless tldr_found\n raise SyntaxError.new(line_no, pos, 'Unterminated multiline comment')\n end\n next\n end\n\n if @line[@pos] == \"\\n\" || @line[@pos] == '!'\n # Handle newline and bang separately\n lexeme = [@line[@pos], @line_no, @pos + 1]\n @pos += 1\n elsif @line[@pos] == ','\n # Comma is a virtual newline\n lexeme = [\"\\n\", @line_no, @pos + 1]\n @pos += 1\n elsif @line[@pos] == '\"'\n # Strings begin with \"\n # Need to handle empty strings separately\n if @line[@pos + 1] == '\"'\n string = '\"\"'\n else\n m = @line.match(/([^:](?:::)*)\"/, @pos + 1)\n string = @line[@pos..m.end(0) - 1] unless m.nil?\n end\n # String must be followed by an allowed lexeme delimiter\n if string.nil? || !lexeme_delimiter?(@pos + string.length)\n raise SyntaxError.new(@line_no, @pos + 1, 'Unterminated string constant')\n end\n lexeme = [%Q[\"#{escape_string(string[1..-2])}\"], @line_no, @pos + 1]\n @pos = @pos + string.length\n else\n # Grab as much characters as we can until meeting lexeme delimiter\n # Treat what we grabbed as a lexeme\n seq, pos = '', @pos + 1\n until lexeme_delimiter?(@pos)\n seq += @line[@pos]\n @pos += 1\n end\n lexeme = [seq, @line_no, pos]\n end\n\n break\n end\n @last_lexeme = lexeme\n end", "def lit(sexp, level)\n val = sexp.shift\n case val\n when Numeric\n val.inspect\n when Symbol\n @symbols[val.to_s] ||= \"$symbol_#{@sym_id += 1}\"\n when Regexp\n val == // ? /^/.inspect : val.inspect\n when Range\n \"$range(#{val.begin}, #{val.end}, #{val.exclude_end?})\"\n else\n raise \"Bad lit: #{val.inspect}\"\n end\n end", "def next_token\n token = @enum[@pointer]\n raise NonstringTokenError unless token.nil? || token.kind_of?(String) \n @pointer += 1\n token\n end", "def string_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 42)\n\n type = STRING_LITERAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 486:4: '\\\\'' LITERAL_CHAR ( LITERAL_CHAR )* '\\\\''\n match(?\\')\n literal_char!\n # at line 486:22: ( LITERAL_CHAR )*\n loop do #loop 5\n alt_5 = 2\n look_5_0 = @input.peek(1)\n\n if (look_5_0.between?(0x0000, ?&) || look_5_0.between?(?(, 0xFFFF)) \n alt_5 = 1\n\n end\n case alt_5\n when 1\n # at line 486:22: LITERAL_CHAR\n literal_char!\n\n else\n break #loop 5\n end\n end\n match(?\\')\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 42)\n\n end", "def next_input_element(hint)\n if ret = @lit_cache[@pos]\n @pos = @lit_nextpos[@pos]\n @head_pos = @pos\n return ret\n end\n pos0 = @pos\n #\n # skip white space here, because ECMA262(5.1.2) says:\n #\n # Simple white space and single-line comments are discarded and\n # do not appear in the stream of input elements for the\n # syntactic grammar.\n #\n while white_space or single_line_comment\n end\n\n ret = line_terminator || multi_line_comment || token\n if ret\n @lit_cache[pos0] = ret\n @lit_nextpos[pos0] = @pos\n @head_pos = @pos\n return ret\n end\n\n if @codes[@pos].nil?\n return nil\n end\n if hint.nil?\n if @codes[@pos] == 0x2f\n ECMA262::LIT_DIV_OR_REGEXP_LITERAL\n else\n nil\n end\n elsif hint == :div\n ret = div_punctuator\n if ret\n @lit_cache[pos0] = ret\n @lit_nextpos[pos0] = @pos\n end\n @head_pos = @pos\n return ret\n elsif hint == :regexp\n ret = regexp_literal\n if ret\n @lit_cache[pos0] = ret\n @lit_nextpos[pos0] = @pos\n end\n @head_pos = @pos\n return ret\n else\n if @codes[@pos] == 0x2f\n ECMA262::LIT_DIV_OR_REGEXP_LITERAL\n else\n nil\n end\n end\n end", "def next_token\n tokens.shift\n end", "def next\n token = next_token\n token = next_token while token&.empty?\n token\n end", "def next\n\t\tif @next_token\n\t\t\ttoken = @next_token\n\t\t\t@next_token = nil\n\t\t\treturn token\n\t\telse\n\t\t\ttoken = read_token\n\t\t\treturn token\n\t\tend\n\tend", "def push_literal\n <<-CODE\n next_int;\n t1 = cpu_current_literals(state, c);\n t2 = fast_fetch(t1, _int);\n stack_push(t2);\n CODE\n end", "def process_lit(exp)\n # TODO: audit against obfuscator\n value = exp.shift\n case value\n when Integer then\n return \"LONG2NUM(#{value})\"\n when Float then\n return \"rb_float_new(#{value})\"\n when Symbol\n return \"ID2SYM(rb_intern(#{value.to_s.inspect}))\"\n when Range\n f = process_lit [ value.first ]\n l = process_lit [ value.last ]\n x = 0\n x = 1 if value.exclude_end?\n\n return \"rb_range_new(#{f}, #{l}, #{x})\"\n when Regexp\n src = value.source\n return \"rb_reg_new(#{src.inspect}, #{src.size}, #{value.options})\"\n else\n raise \"Bug! no: Unknown literal #{value}:#{value.class}\"\n end\n return nil\n end", "def peek_token\n return nil if @start >= @expr.length\n if @start == 0 && @finish == 0\n return @expr[0]\n else\n token = @expr[@start...@finish]\n\n if token.empty?\n @finish = @finish + 1\n peek_token\n else\n return token\n end\n end\n end", "def next()\n return \" \" unless has_next()\n if(@count <= 0)\n @char = @compressed_string[@i]\n @i += 1\n @count = get_count()\n end\n @count -= 1\n return @char\n end", "def push_literal(*args)\n new_literal = Literal.new(self, *args)\n @literal_stack.push(new_literal)\n\n if new_literal.words? && new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_backslash_delimited_words\n else\n self.class.lex_en_plain_backslash_delimited_words\n end\n elsif new_literal.words? && !new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_words\n else\n self.class.lex_en_plain_words\n end\n elsif !new_literal.words? && new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_backslash_delimited\n else\n self.class.lex_en_plain_backslash_delimited\n end\n else\n if new_literal.interpolate?\n self.class.lex_en_interp_string\n else\n self.class.lex_en_plain_string\n end\n end\n end", "def next_token\n @tokens.shift\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n token =\n case state\n when nil then\n case\n when ss.skip(/\\s+/) then\n # do nothing\n when ss.skip(/:(#{SYMBOL_NAME})/o) then\n action { emit :tSYMBOL, &:to_sym }\n when ss.skip(/\"(.+?)\"/) then\n action { emit :tSTRING }\n when ss.skip(/[-+]?\\d+\\.\\d+/) then\n action { emit :tNUMBER, &:to_f }\n when ss.skip(/[-+]?\\d+/) then\n action { emit :tNUMBER, &:to_i }\n when ss.skip(/#{Regexp.union(\n %w\"( ) { | } [ ] < > $ ! ^ ` ... + * ? ,\"\n )}/o) then\n action { emit ss.matched, &:to_sym }\n when ss.skip(/#{REGEXP}/o) then\n action { emit_regexp }\n when ss.skip(/%?(#{CONST_NAME})/o) then\n action { emit :tPARAM_CONST }\n when ss.skip(/%([a-z_]+)/) then\n action { emit :tPARAM_NAMED }\n when ss.skip(/%(\\d*)/) then\n action { emit(:tPARAM_NUMBER) { |s| s.empty? ? 1 : s.to_i } } # Map `%` to `%1`\n when ss.skip(/_(#{IDENTIFIER})/o) then\n action { emit :tUNIFY }\n when ss.skip(/_/o) then\n action { emit :tWILDCARD }\n when ss.skip(/\\#(#{CALL})/o) then\n action { @state = :ARG; emit :tFUNCTION_CALL, &:to_sym }\n when ss.skip(/#{IDENTIFIER}\\?/o) then\n action { @state = :ARG; emit :tPREDICATE, &:to_sym }\n when ss.skip(/#{NODE_TYPE}/o) then\n action { emit :tNODE_TYPE, &:to_sym }\n when ss.skip(/\\#.*/) then\n action { emit_comment }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n when :ARG then\n case\n when ss.skip(/\\(/) then\n action { @state = nil; emit :tARG_LIST }\n when ss.skip(//) then\n action { @state = nil }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def char_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 41)\n\n type = CHAR_LITERAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 482:4: '\\\\'' LITERAL_CHAR '\\\\''\n match(?\\')\n literal_char!\n match(?\\')\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 41)\n\n end", "def find_literal(what)\n idx = @literals.index(what)\n return idx if idx\n add_literal(what)\n end", "def next_token\n \n # Early return if there is nothing to be read. This means we've reached the end of the file.\n \n unless @file[@pos]\n return nil\n end\n \n # This is the token that will be returned.\n token = Compiler::Token.new\n \n # Initializes a new instance of the automaton.\n automaton = Automaton.new\n \n # Will be set inside the loop, if necessary.\n increment_next = false\n \n # Will be set inside the loop. Marks whether we've reached the end of the file.\n eof = false\n \n # Build a new token while we don't have a new word yet and isn't in the failed state\n while ((automaton.state != :A || automaton.word.empty?) && automaton.state != :failed)\n \n # The next input for the automaton\n char = @file[@pos]\n \n if char\n \n # Moves the pointer to the next char\n @pos += 1\n \n automaton.transition(char)\n \n # While the automaton hasn't started to build a new word yet, increments the line and column numbers.\n # In this phase, we're just skipping blank characters\n if automaton.word.empty?\n if increment_next\n if char == \"\\n\"\n increment_next = true\n else\n increment_next = false\n end\n @line += 1\n @column = 0\n elsif char == \"\\n\"\n @column += 1\n increment_next = true\n else\n @column += 1\n end\n end\n \n else\n eof = true\n puts \"breaking\"\n break\n end\n end\n \n \n \n if eof\n automaton.transition(\"\\n\")\n else\n @pos -= 1\n end\n \n if (automaton.type == :identifier) && (Compiler.reserved_words.is_reserved?(automaton.word))\n token.type = :reserved_word\n else\n token.type = automaton.type\n end\n \n token.value = automaton.word\n token.line = @line\n token.column = @column\n \n return token\n \n end", "def next_line\r\n while true\r\n if (@lexemes[@pointer] != nil && @lexemes[@pointer] != '@')\r\n @pointer = @pointer.next\r\n else\r\n break\r\n end\r\n end\r\n end", "def next!() end", "def consume!\n empty_line ||\n name_token ||\n comment_token ||\n whitespace_token ||\n line_token ||\n heredoc_token ||\n string_token ||\n number_token ||\n regex_token ||\n literal_token\n end", "def next_token; @stack.shift; end", "def racc_read_token(t, tok, val); end", "def lex_en_line_begin=(_arg0); end", "def lex_en_line_begin=(_arg0); end", "def lex_en_line_begin=(_arg0); end", "def set_literal\n <<-CODE\n next_int;\n tuple_put(state, cpu_current_literals(state, c), _int, stack_top());\n CODE\n end", "def match(ptr, depth = 0)\n case c = ptr.peek(1)\n when '\"', '`'\n start_pos = ptr.pos\n ptr.pos += 1\n AST.new(:string, value: ptr.scan_until(/#{c}/).chop,\n attributes: { type: char_to_type(c) },\n pos: start_pos)\n end\n end", "def getNextToken\n \n #Check if the end has been reached\n if @currentChar == nil\n return\n end\n if @currentChar.match(/\\s/) != nil\n skipWhitespaces\n end\n \n if @currentChar == '%'\n comment\n if @currentChar.match(/\\s/) != nil\n skipWhitespaces\n end\n end \n \n if @currentChar.match(/[A-Za-z0-9_]/) != nil\n return Token.new(NAME, name)\n end\n \n if @currentChar == \"\\\"\"\n return Token.new(STRING, string)\n end\n \n if @currentChar == '{'\n advance\n return Token.new(OPENING_BRACE,'{')\n end\n \n if @currentChar == '}'\n advance\n return Token.new(CLOSING_BRACE,'}')\n end\n \n if @currentChar == '['\n advance\n return Token.new(OPENING_BRACKET,'[')\n end\n \n if @currentChar == ']'\n advance\n return Token.new(CLOSING_BRACKET,']')\n end\n \n if @currentChar == ':'\n advance\n return Token.new(COLON,':')\n end\n \n if @currentChar == '*'\n advance\n return Token.new(ASTERIX,'*')\n end\n \n if @currentChar == '='\n advance\n return Token.new(EQUALS,'=')\n end\n \n if @currentChar == ';'\n advance\n return Token.new(SEMICOLON,';')\n end\n \n if @currentChar == '^'\n advance\n return Token.new(CIRCUMFLEX,'^')\n end\n \n if @currentChar == '+'\n advance\n return Token.new(PLUS,'+')\n end\n if @currentChar == '('\n advance\n return Token.new(OPENING_PARANTHESIS,'(')\n end\n if @currentChar == ')'\n advance\n return Token.new(CLOSING_PARANTHESIS,')')\n end\n if @currentChar == '.'\n advance\n return Token.new(DOT,'.')\n end\n if @currentChar == '#'\n advance\n return Token.new(HASH,'#')\n end\n if @currentChar == ','\n advance\n return Token.new(COMMA,',')\n end\n error\n \n return Token.new(EOF,'EOF') \n \n end", "def lex_start; end", "def lex_start; end", "def lex_start; end", "def lex_start; end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def next\n displacement = @file.gets.try(:chomp).try(:to_f)\n return nil unless displacement\n\n ret = @curr_val\n @curr_val += displacement\n ret\n end", "def read_character\n lit = read_literal\n\n return \" \" if lit.empty? && peek_char == \" \"\n CHARACTERS.fetch(lit.downcase) do\n # Return just the first character\n unread(lit[1..-1])\n lit[0,1]\n end\n end", "def next_token\n\t\[email protected]_token\n\tend", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n token =\n case state\n when nil then\n case\n when text = ss.scan(/#{DIGIT}/) then\n action { [:DIGIT, text.to_i] }\n when text = ss.scan(/#{ADDITION}/) then\n action { [:ADDITION, text] }\n when text = ss.scan(/#{SUBSTRACTION}/) then\n action { [:SUBSTRACTION, text] }\n when text = ss.scan(/#{MULTIPLICATION}/) then\n action { [:MULTIPLICATION, text] }\n when text = ss.scan(/#{DIVISION}/) then\n action { [:DIVISION, text] }\n when text = ss.scan(/#{OPENING_PARANTHESIS}/) then\n action { [:OPENING_PARANTHESIS, text] }\n when text = ss.scan(/#{CLOSING_PARANTHESIS}/) then\n action { [:CLOSING_PARANTHESIS, text] }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def next\n @tok ||= read_token\n @tok, tok = nil, @tok\n @prev = tok\n return tok\n end", "def peek\n @tokens[@position]\n end", "def get\n @source_index += 1\n\n # Maintain line count.\n prev_char = @source_text[@source_index - 1]\n if @source_index.positive? && prev_char == \"\\n\"\n @line_index += 1\n @col_index = -1\n end\n\n @col_index += 1\n char = if @source_index > @last_index\n # Read past the end of source text.\n END_MARK\n else\n @source_text[@source_index]\n end\n Character.new(char, @line_index, @col_index, @source_index, @source_text)\n end", "def get_token\n @tokenbuf << read_token if @tokenbuf.length == 0\n return @tokenbuf.shift\n end", "def run(source, until_token = :invalid, token_count = nil)\n @at_end = false\n @source = source\n @reader = source.each_char\n\n read_next()\n\n while token_count == nil || token_count > 0\n skip_whitespace()\n current = @marker.character\n break unless current\n\n token = Token.new\n token.kind = :invalid\n token.from = @marker.source_index\n token.position = @marker.position.dup\n\n case current\n when ?\", ?'\n read_string(token)\n\n when ?0\n case peek_next()\n when ?x, ?X, ?b, ?B then read_base_number(token)\n else read_number(token)\n end\n\n when ?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9\n read_number(token)\n\n # dot, double dot, triple dot, and floats beginning with a dot\n when ?.\n token.kind = :dot\n case peek_next()\n when ?0, ?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9 then read_number(token)\n when ?.\n read_next()\n token.kind = :double_dot\n\n if peek_next() == ?.\n read_next()\n token.kind = :triple_dot\n end\n\n token.value = Token::DESCRIPTORS[token.kind]\n else\n token.value = Token::DESCRIPTORS[token.kind]\n end\n\n when ?_, ?a, ?b, ?c, ?d, ?e, ?f, ?g, ?h, ?i, ?j, ?k, ?l, ?m, ?n, ?o, ?p,\n ?q, ?r, ?s, ?t, ?u, ?v, ?w, ?x, ?y, ?z, ?A, ?B, ?C, ?D, ?E, ?F, ?G, ?H,\n ?I, ?J, ?K, ?L, ?M, ?N, ?O, ?P, ?Q, ?R, ?S, ?T, ?U, ?V, ?W, ?X, ?Y, ?Z\n read_word(token)\n\n when ?\\n\n token.value = current\n token.kind = :newline\n\n when ??, ?#, ?@, ?$, ?%, ?(, ?), ?[, ?], ?{, ?}, ?^, ?~, ?`, ?\\\\, ?,, ?;\n token.value = current\n token.kind = PUNCTUATION_STRINGS[current]\n\n when ?=, ?|, ?&, ?:, ?+, ?*\n current << read_next() if peek_next() == current\n token.value = current\n token.kind = PUNCTUATION_STRINGS[current]\n\n when ?!\n current << read_next() if peek_next() == ?=\n token.value = current\n token.kind = PUNCTUATION_STRINGS[current]\n\n when ?>, ?<\n case peek_next()\n when ?=, current then current << read_next()\n end\n token.value = current\n token.kind = PUNCTUATION_STRINGS[current]\n\n when ?-\n case peek_next()\n when ?>, current then current << read_next()\n end\n token.value = current\n token.kind = PUNCTUATION_STRINGS[current]\n\n when ?/\n case peek_next()\n when ?/ then read_line_comment(token)\n when ?* then read_block_comment(token)\n else\n token.value = Token::DESCRIPTORS[token.kind = :slash]\n read_next()\n end\n\n end # case current\n\n token.to = @marker.source_index\n last_kind = token.kind\n if !(@skip_comments && token.comment?) && !(@skip_newlines && token.newline?)\n if last_kind != :invalid\n @tokens << token\n yield token if block_given?\n else\n raise RuntimeError, \"#{token.position} Invalid token: #{token.inspect}\"\n end\n end\n\n break if until_token == last_kind\n\n read_next()\n token_count -= 1 unless token_count.nil?\n end # while current && token_count > 0\n\n @source = nil\n @reader = nil\n\n self\n end", "def string_literal\n # StringLiteral ::\n # \" DoubleStringCharactersopt \"\n # ' SingleStringCharactersopt '\n #\n # DoubleStringCharacters ::\n # DoubleStringCharacter DoubleStringCharactersopt\n #\n # SingleStringCharacters ::\n # SingleStringCharacter SingleStringCharactersopt\n #\n # DoubleStringCharacter ::\n # SourceCharacter but not one of \" or \\ or LineTerminator\n # \\ EscapeSequence\n # LineContinuation\n #\n # SingleStringCharacter ::\n # SourceCharacter but not one of ' or \\ or LineTerminator\n # \\ EscapeSequence\n # LineContinuation\n #\n if (code = @codes[@pos]) == 0x27 #'\n term = 0x27\n elsif code == 0x22 #\"\n term = 0x22\n else\n return nil\n end\n @pos += 1\n pos0 = @pos\n\n str = []\n while (code = @codes[@pos])\n if code.nil?\n raise ParseError.new(\"no `#{term}' at end of string\", self)\n elsif line_terminator?(code)\n raise ParseError.new(\"string has line terminator in body\", self)\n elsif code == 0x5c #\\\n @pos += 1\n str.push(escape_sequence)\n elsif code == term\n @pos += 1\n return ECMA262::ECMA262String.new(str.compact.pack(\"U*\"))\n else\n @pos += 1\n str.push(code)\n end\n end\n nil\n end", "def lex_en_expr_end; end", "def lex_en_expr_end; end", "def lex_en_expr_end; end", "def lex_en_expr_mid; end", "def lex_en_expr_mid; end", "def lex_en_expr_mid; end", "def la( count = 1 )\n until @lookahead.length >= count\n if token = @lexer.next_token( @lexer_state.number ) then\n @lookahead << token\n else\n nyi \"error handling for lexer error\" if @lexer.input_remaining?\n break\n end\n end\n \n return @lookahead[count - 1]\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n if ss.check(/\\n/) then\n self.lineno += 1\n # line starts 1 position after the newline\n self.start_of_current_line_pos = ss.pos + 1\n end\n self.old_pos = ss.pos\n token =\n case state\n when nil, :option, :inner, :start, :macro, :rule, :group then\n case\n when ss.skip(/options?.*/) then\n [:state, :option]\n when ss.skip(/inner.*/) then\n [:state, :inner]\n when ss.skip(/macros?.*/) then\n [:state, :macro]\n when ss.skip(/rules?.*/) then\n [:state, :rule]\n when ss.skip(/start.*/) then\n [:state, :start]\n when ss.skip(/end/) then\n [:state, :END]\n when ss.skip(/\\A((?:.|\\n)*)class ([\\w:]+.*)/) then\n action { [:class, *matches] }\n when ss.skip(/\\n+/) then\n # do nothing\n when text = ss.scan(/\\s*(\\#.*)/) then\n action { [:comment, text] }\n when (state == :option) && (ss.skip(/\\s+/)) then\n # do nothing\n when (state == :option) && (text = ss.scan(/stub/i)) then\n action { [:option, text] }\n when (state == :option) && (text = ss.scan(/debug/i)) then\n action { [:option, text] }\n when (state == :option) && (text = ss.scan(/do_parse/i)) then\n action { [:option, text] }\n when (state == :option) && (text = ss.scan(/lineno/i)) then\n action { [:option, text] }\n when (state == :option) && (text = ss.scan(/column/i)) then\n action { [:option, text] }\n when (state == :inner) && (text = ss.scan(/.*/)) then\n action { [:inner, text] }\n when (state == :start) && (text = ss.scan(/.*/)) then\n action { [:start, text] }\n when (state == :macro) && (ss.skip(/\\s+(\\w+)\\s+#{RE}/o)) then\n action { [:macro, *matches] }\n when (state == :rule) && (ss.skip(/\\s*#{ST}?[\\ \\t]*#{RE}[\\ \\t]*#{ACT}?/o)) then\n action { [:rule, *matches] }\n when (state == :rule) && (ss.skip(/\\s*:[\\ \\t]*#{RE}/o)) then\n action { [:grouphead, *matches] }\n when (state == :group) && (ss.skip(/\\s*:[\\ \\t]*#{RE}/o)) then\n action { [:grouphead, *matches] }\n when (state == :group) && (ss.skip(/\\s*\\|\\s*#{ST}?[\\ \\t]*#{RE}[\\ \\t]*#{ACT}?/o)) then\n action { [:group, *matches] }\n when (state == :group) && (ss.skip(/\\s*#{ST}?[\\ \\t]*#{RE}[\\ \\t]*#{ACT}?/o)) then\n action { [:groupend, *matches] }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n when :END then\n case\n when ss.skip(/\\n+/) then\n # do nothing\n when text = ss.scan(/.*/) then\n action { [:end, text] }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def lex_en_expr_beg=(_arg0); end", "def lex_en_expr_beg=(_arg0); end", "def lex_en_expr_beg=(_arg0); end", "def next_word\n return unless md = get.match(FORWARD_WORD, cursor)\n self.cursor = md.offset(0).last\n end", "def next()\n @index += 1\n @string[@index...(@index+1)]\n end", "def next_token\n @current_token = @lexer.next_token\n end", "def next() end", "def next() end", "def next_char\n @pos += 1\n if (c = @source[@pos..@pos]) == BACKSLASH\n @pos += 1\n [true, @source[@pos..@pos]]\n else\n [false, c]\n end\n end", "def get_next\n return if eof?\n\n @buffer << @io.gets if @buffer.empty?\n\n until @io.eof?\n line = @io.gets\n next unless line\n\n if @parser.start_new?(line) || @buffer.empty?\n @buffer << line\n break\n else\n @buffer.last << line\n end\n end\n\n return if @buffer.empty?\n @parser.parse(@buffer.slice!(0)) || self.get_next\n end", "def next_item\n lexeme, token = @lexer.next, nil\n if lexeme[0].nil?\n token = { type: :eof }\n elsif lexeme[0].lol_string?\n token = { type: :string, data: lexeme[0][1..-2] }\n elsif lexeme[0].lol_integer?\n token = { type: :integer, data: lexeme[0].to_i }\n elsif lexeme[0].lol_float?\n token = { type: :float, data: lexeme[0].to_f }\n elsif lexeme[0].lol_boolean?\n token = { type: :boolean, data: (lexeme[0] == 'WIN') }\n elsif lexeme[0] == '!'\n token = { type: :exclamation }\n elsif lexeme[0] == \"\\n\"\n token = { type: :newline }\n else\n # Try to match keyword\n token_type = match_longest(lexeme[0], @token_table)\n unless token_type.nil?\n token = { type: token_type }\n # Consume all peeked lexemes\n token_type.to_s.count('_').times { @lexer.next }\n else\n # Try to match identifier\n if lexeme[0].lol_identifier?\n token = { type: :identifier, data: lexeme[0] }\n end\n end\n end\n raise UnknownTokenError.new(lexeme) if token.nil?\n token.merge(line: lexeme[1], pos: lexeme[2])\n end", "def peek\n @tokens[@pos]\n end", "def double_angle_string_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 45)\n\n type = DOUBLE_ANGLE_STRING_LITERAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 500:4: '<<' ( . )* '>>'\n match(\"<<\")\n # at line 500:9: ( . )*\n loop do #loop 8\n alt_8 = 2\n look_8_0 = @input.peek(1)\n\n if (look_8_0 == ?>) \n look_8_1 = @input.peek(2)\n\n if (look_8_1 == ?>) \n alt_8 = 2\n elsif (look_8_1.between?(0x0000, ?=) || look_8_1.between?(??, 0xFFFF)) \n alt_8 = 1\n\n end\n elsif (look_8_0.between?(0x0000, ?=) || look_8_0.between?(??, 0xFFFF)) \n alt_8 = 1\n\n end\n case alt_8\n when 1\n # at line 500:9: .\n match_any\n\n else\n break #loop 8\n end\n end\n match(\">>\")\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 45)\n\n end", "def _reduce_279(val, _values, result)\n result = lexer.line\n \n result\nend", "def get_char\n @look = @expression[@number]\n @number +=1\nend", "def next_char\n self.cursor += 1\n end", "def next()\n if has_next()\n @strings[0][1]-=1\n c = @strings[0][0]\n while has_next() and @strings[0][1] == 0\n @strings.shift\n end\n return c\n end\n return \" \"\n end", "def get_procedure_literal\r\n save, @buffer = @buffer, \"\"\r\n open_procedure_literal\r\n\r\n begin\r\n token = get_procedure_token\r\n due_token(token)\r\n end until token.has_tag?(:end)\r\n\r\n close_procedure_literal\r\n (_, @buffer = @buffer, save)[0]\r\n end", "def next_token\n return [false, false] if @src.eos?\n# p @src.rest if @yydebug\n if ret = @src.scan(EM_OPEN_RE)\n @pre << ret\n [:EM_OPEN, ret]\n elsif ret = @src.scan(EM_CLOSE_RE)\n @pre << ret\n [:EM_CLOSE, ret]\n elsif ret = @src.scan(CODE_OPEN_RE)\n @pre << ret\n [:CODE_OPEN, ret]\n elsif ret = @src.scan(CODE_CLOSE_RE)\n @pre << ret\n [:CODE_CLOSE, ret]\n elsif ret = @src.scan(VAR_OPEN_RE)\n @pre << ret\n [:VAR_OPEN, ret]\n elsif ret = @src.scan(VAR_CLOSE_RE)\n @pre << ret\n [:VAR_CLOSE, ret]\n elsif ret = @src.scan(KBD_OPEN_RE)\n @pre << ret\n [:KBD_OPEN, ret]\n elsif ret = @src.scan(KBD_CLOSE_RE)\n @pre << ret\n [:KBD_CLOSE, ret]\n elsif ret = @src.scan(INDEX_OPEN_RE)\n @pre << ret\n [:INDEX_OPEN, ret]\n elsif ret = @src.scan(INDEX_CLOSE_RE)\n @pre << ret\n [:INDEX_CLOSE, ret]\n elsif ret = @src.scan(REF_OPEN_RE)\n @pre << ret\n [:REF_OPEN, ret]\n elsif ret = @src.scan(REF_CLOSE_RE)\n @pre << ret\n [:REF_CLOSE, ret]\n elsif ret = @src.scan(FOOTNOTE_OPEN_RE)\n @pre << ret\n [:FOOTNOTE_OPEN, ret]\n elsif ret = @src.scan(FOOTNOTE_CLOSE_RE)\n @pre << ret\n [:FOOTNOTE_CLOSE, ret]\n elsif ret = @src.scan(VERB_OPEN_RE)\n @pre << ret\n [:VERB_OPEN, ret]\n elsif ret = @src.scan(VERB_CLOSE_RE)\n @pre << ret\n [:VERB_CLOSE, ret]\n elsif ret = @src.scan(BAR_RE)\n @pre << ret\n [:BAR, ret]\n elsif ret = @src.scan(QUOTE_RE)\n @pre << ret\n [:QUOTE, ret]\n elsif ret = @src.scan(SLASH_RE)\n @pre << ret\n [:SLASH, ret]\n elsif ret = @src.scan(BACK_SLASH_RE)\n @pre << ret\n [:BACK_SLASH, ret]\n elsif ret = @src.scan(URL_RE)\n @pre << ret\n [:URL, ret]\n elsif ret = @src.scan(OTHER_RE)\n @pre << ret\n [:OTHER, ret]\n else\n ret = @src.rest\n @pre << ret\n @src.terminate\n [:OTHER, ret]\n end\nend", "def literal?(node); end" ]
[ "0.66224873", "0.6524835", "0.6495376", "0.6463879", "0.63447595", "0.62518346", "0.6235981", "0.61942846", "0.6128024", "0.61243206", "0.60881156", "0.6084366", "0.6065472", "0.6055462", "0.60457474", "0.6039622", "0.6003619", "0.5981759", "0.5981759", "0.5981759", "0.5936302", "0.59323156", "0.5919721", "0.5851749", "0.5851749", "0.5851749", "0.58461386", "0.5845553", "0.5836401", "0.5792651", "0.57877076", "0.5773331", "0.57722837", "0.5770015", "0.5765936", "0.5709608", "0.57078785", "0.5687482", "0.56861675", "0.5649122", "0.5642091", "0.56407034", "0.56383836", "0.56367373", "0.5634531", "0.56328267", "0.56261027", "0.56212807", "0.55955815", "0.5585243", "0.5585243", "0.5585243", "0.55727047", "0.55708855", "0.55587757", "0.55374855", "0.55374855", "0.55374855", "0.55374855", "0.55242455", "0.55242455", "0.55242455", "0.55242455", "0.5523332", "0.5517912", "0.5505392", "0.55031437", "0.5502992", "0.55029136", "0.550265", "0.5491453", "0.5486149", "0.5480158", "0.5475586", "0.5475586", "0.5475586", "0.54736364", "0.54736364", "0.54736364", "0.546839", "0.546669", "0.54614264", "0.54614264", "0.54614264", "0.5457621", "0.54525864", "0.5450547", "0.5426699", "0.5426699", "0.5426697", "0.5411475", "0.54060286", "0.54053396", "0.5403521", "0.54027617", "0.5393605", "0.53933465", "0.5392465", "0.5390782", "0.5373531", "0.5369645" ]
0.0
-1
fetch next literal. position is forwarded. white spaces are skipped and ignored. line terminators are not ignored.
def fwd_lit_nolt(hint) while lit = next_input_element(hint) and lit.ws? end lit end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_lit\n case l.front.type\n when :str then parse_str_lit\n when :chr then parse_char_lit\n when :num then parse_num_lit\n else\n error \"expected a literal\"\n end\n end", "def literal\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 22 )\n\n\n value = nil\n\n\n a = nil\n\n\n begin\n # at line 142:3: (a= INTEGER |a= FLOAT |a= BOOLEAN |a= STRING |a= CHAR )\n alt_38 = 5\n case look_38 = @input.peek( 1 )\n when INTEGER then alt_38 = 1\n when FLOAT then alt_38 = 2\n when BOOLEAN then alt_38 = 3\n when STRING then alt_38 = 4\n when CHAR then alt_38 = 5\n else\n @state.backtracking > 0 and raise( ANTLR3::Error::BacktrackingFailed )\n\n\n\n raise NoViableAlternative( \"\", 38, 0 )\n\n end\n case alt_38\n when 1\n # at line 142:5: a= INTEGER\n a = match( INTEGER, TOKENS_FOLLOWING_INTEGER_IN_literal_1037 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Int, a.text) \n # <-- action\n end\n\n\n when 2\n # at line 143:5: a= FLOAT\n a = match( FLOAT, TOKENS_FOLLOWING_FLOAT_IN_literal_1047 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Float, a.text) \n # <-- action\n end\n\n\n when 3\n # at line 144:5: a= BOOLEAN\n a = match( BOOLEAN, TOKENS_FOLLOWING_BOOLEAN_IN_literal_1059 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Bool, a.text) \n # <-- action\n end\n\n\n when 4\n # at line 145:5: a= STRING\n a = match( STRING, TOKENS_FOLLOWING_STRING_IN_literal_1069 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:String, a.text) \n # <-- action\n end\n\n\n when 5\n # at line 146:5: a= CHAR\n a = match( CHAR, TOKENS_FOLLOWING_CHAR_IN_literal_1080 )\n\n # syntactic predicate action gate test\n if @state.backtracking == 0\n # --> action\n value = LiteralEval.new(:Char, a.text) \n # <-- action\n end\n\n\n end\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 22 )\n\n\n end\n\n return value\n end", "def next_token\n result = peek_token\n @start = @finish\n return result if @start >= @expr.length\n\n if @expr[@start].numeric?\n @finish = @start + 1\n while @finish < @expr.length && @expr[@finish].to_s.numeric?\n @finish = @finish + 1\n end\n else\n @finish = @start + 1\n end\n result\n end", "def _literal\n\n _save = self.pos\n while true # choice\n _tmp = apply(:_number)\n break if _tmp\n self.pos = _save\n _tmp = apply(:_string)\n break if _tmp\n self.pos = _save\n break\n end # end choice\n\n set_failed_rule :_literal unless _tmp\n return _tmp\n end", "def literal\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 33 )\n return_value = LiteralReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n root_0 = nil\n\n _last = _first_0 = nil\n string_literal267 = nil\n __IVAR268__ = nil\n __ID269__ = nil\n string_literal270 = nil\n string_literal271 = nil\n string_literal272 = nil\n string_literal273 = nil\n __NUMBER274__ = nil\n __STRING275__ = nil\n __DOC276__ = nil\n __REGEX277__ = nil\n __ARRAY278__ = nil\n __OBJECT280__ = nil\n string_literal282 = nil\n __ID283__ = nil\n string_literal286 = nil\n argument279 = nil\n property_definition281 = nil\n parameters284 = nil\n statement_block285 = nil\n parameters287 = nil\n statement_block288 = nil\n\n tree_for_string_literal267 = nil\n tree_for_IVAR268 = nil\n tree_for_ID269 = nil\n tree_for_string_literal270 = nil\n tree_for_string_literal271 = nil\n tree_for_string_literal272 = nil\n tree_for_string_literal273 = nil\n tree_for_NUMBER274 = nil\n tree_for_STRING275 = nil\n tree_for_DOC276 = nil\n tree_for_REGEX277 = nil\n tree_for_ARRAY278 = nil\n tree_for_OBJECT280 = nil\n tree_for_string_literal282 = nil\n tree_for_ID283 = nil\n tree_for_string_literal286 = nil\n\n begin\n # at line 229:3: ( 'this' | IVAR | ID | 'null' | 'true' | 'false' | 'undefined' | NUMBER | STRING | DOC | REGEX | ^( ARRAY ( argument )* ) | ^( OBJECT ( property_definition )* ) | ^( 'function' ( ID )? parameters statement_block ) | ^( '->' ( parameters )? statement_block ) )\n alt_41 = 15\n case look_41 = @input.peek( 1 )\n when THIS then alt_41 = 1\n when IVAR then alt_41 = 2\n when ID then alt_41 = 3\n when NULL then alt_41 = 4\n when TRUE then alt_41 = 5\n when FALSE then alt_41 = 6\n when UNDEFINED then alt_41 = 7\n when NUMBER then alt_41 = 8\n when STRING then alt_41 = 9\n when DOC then alt_41 = 10\n when REGEX then alt_41 = 11\n when ARRAY then alt_41 = 12\n when OBJECT then alt_41 = 13\n when FUNCTION then alt_41 = 14\n when ARROW then alt_41 = 15\n else\n raise NoViableAlternative( \"\", 41, 0 )\n end\n case alt_41\n when 1\n root_0 = @adaptor.create_flat_list\n\n\n # at line 229:5: 'this'\n _last = @input.look\n string_literal267 = match( THIS, TOKENS_FOLLOWING_THIS_IN_literal_1643 )\n\n tree_for_string_literal267 = @adaptor.copy_node( string_literal267 )\n\n @adaptor.add_child( root_0, tree_for_string_literal267 )\n\n\n\n when 2\n root_0 = @adaptor.create_flat_list\n\n\n # at line 230:5: IVAR\n _last = @input.look\n __IVAR268__ = match( IVAR, TOKENS_FOLLOWING_IVAR_IN_literal_1649 )\n\n tree_for_IVAR268 = @adaptor.copy_node( __IVAR268__ )\n\n @adaptor.add_child( root_0, tree_for_IVAR268 )\n\n\n\n when 3\n root_0 = @adaptor.create_flat_list\n\n\n # at line 231:5: ID\n _last = @input.look\n __ID269__ = match( ID, TOKENS_FOLLOWING_ID_IN_literal_1655 )\n\n tree_for_ID269 = @adaptor.copy_node( __ID269__ )\n\n @adaptor.add_child( root_0, tree_for_ID269 )\n\n\n\n when 4\n root_0 = @adaptor.create_flat_list\n\n\n # at line 232:5: 'null'\n _last = @input.look\n string_literal270 = match( NULL, TOKENS_FOLLOWING_NULL_IN_literal_1661 )\n\n tree_for_string_literal270 = @adaptor.copy_node( string_literal270 )\n\n @adaptor.add_child( root_0, tree_for_string_literal270 )\n\n\n\n when 5\n root_0 = @adaptor.create_flat_list\n\n\n # at line 233:5: 'true'\n _last = @input.look\n string_literal271 = match( TRUE, TOKENS_FOLLOWING_TRUE_IN_literal_1667 )\n\n tree_for_string_literal271 = @adaptor.copy_node( string_literal271 )\n\n @adaptor.add_child( root_0, tree_for_string_literal271 )\n\n\n\n when 6\n root_0 = @adaptor.create_flat_list\n\n\n # at line 234:5: 'false'\n _last = @input.look\n string_literal272 = match( FALSE, TOKENS_FOLLOWING_FALSE_IN_literal_1673 )\n\n tree_for_string_literal272 = @adaptor.copy_node( string_literal272 )\n\n @adaptor.add_child( root_0, tree_for_string_literal272 )\n\n\n\n when 7\n root_0 = @adaptor.create_flat_list\n\n\n # at line 235:5: 'undefined'\n _last = @input.look\n string_literal273 = match( UNDEFINED, TOKENS_FOLLOWING_UNDEFINED_IN_literal_1679 )\n\n tree_for_string_literal273 = @adaptor.copy_node( string_literal273 )\n\n @adaptor.add_child( root_0, tree_for_string_literal273 )\n\n\n\n when 8\n root_0 = @adaptor.create_flat_list\n\n\n # at line 236:5: NUMBER\n _last = @input.look\n __NUMBER274__ = match( NUMBER, TOKENS_FOLLOWING_NUMBER_IN_literal_1685 )\n\n tree_for_NUMBER274 = @adaptor.copy_node( __NUMBER274__ )\n\n @adaptor.add_child( root_0, tree_for_NUMBER274 )\n\n\n\n when 9\n root_0 = @adaptor.create_flat_list\n\n\n # at line 237:5: STRING\n _last = @input.look\n __STRING275__ = match( STRING, TOKENS_FOLLOWING_STRING_IN_literal_1691 )\n\n tree_for_STRING275 = @adaptor.copy_node( __STRING275__ )\n\n @adaptor.add_child( root_0, tree_for_STRING275 )\n\n\n\n when 10\n root_0 = @adaptor.create_flat_list\n\n\n # at line 238:5: DOC\n _last = @input.look\n __DOC276__ = match( DOC, TOKENS_FOLLOWING_DOC_IN_literal_1697 )\n\n tree_for_DOC276 = @adaptor.copy_node( __DOC276__ )\n\n @adaptor.add_child( root_0, tree_for_DOC276 )\n\n\n\n when 11\n root_0 = @adaptor.create_flat_list\n\n\n # at line 239:5: REGEX\n _last = @input.look\n __REGEX277__ = match( REGEX, TOKENS_FOLLOWING_REGEX_IN_literal_1703 )\n\n tree_for_REGEX277 = @adaptor.copy_node( __REGEX277__ )\n\n @adaptor.add_child( root_0, tree_for_REGEX277 )\n\n\n\n when 12\n root_0 = @adaptor.create_flat_list\n\n\n # at line 240:5: ^( ARRAY ( argument )* )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __ARRAY278__ = match( ARRAY, TOKENS_FOLLOWING_ARRAY_IN_literal_1711 )\n\n tree_for_ARRAY278 = @adaptor.copy_node( __ARRAY278__ )\n\n root_1 = @adaptor.become_root( tree_for_ARRAY278, root_1 )\n\n\n\n if @input.peek == DOWN\n match( DOWN, nil )\n # at line 240:14: ( argument )*\n while true # decision 37\n alt_37 = 2\n look_37_0 = @input.peek( 1 )\n\n if ( look_37_0.between?( AMP, AMP_ASGN ) || look_37_0 == POST_DECR || look_37_0.between?( GEQ, AREF ) || look_37_0.between?( GREATER, HAT ) || look_37_0.between?( ARROW, HAT_ASGN ) || look_37_0 == ASGN || look_37_0 == REGEX || look_37_0 == IN || look_37_0 == INCR || look_37_0.between?( INSTANCEOF, RSHIFT3 ) || look_37_0 == RSHIFT3_ASGN || look_37_0.between?( RSHIFT_ASGN, COLON ) || look_37_0 == LEQ || look_37_0.between?( LESS, SLASH ) || look_37_0 == SLASH_ASGN || look_37_0.between?( STAR, DECR ) || look_37_0 == STAR_ASGN || look_37_0 == LSHIFT || look_37_0.between?( DELETE, THIS ) || look_37_0.between?( MINUS, TILDE ) || look_37_0.between?( MINUS_ASGN, MOD ) || look_37_0.between?( MOD_ASGN, TYPEOF ) || look_37_0.between?( NEQ, UMINUS ) || look_37_0.between?( NEQQ, UNDEFINED ) || look_37_0 == NEW || look_37_0 == NOT || look_37_0.between?( NULL, UPLUS ) || look_37_0 == OBJECT || look_37_0.between?( EQ, OR_ASGN ) || look_37_0 == FALSE || look_37_0 == PIPE || look_37_0 == PIPE_ASGN || look_37_0 == PLUS || look_37_0.between?( ID, DOC ) )\n alt_37 = 1\n\n end\n case alt_37\n when 1\n # at line 240:14: argument\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_argument_IN_literal_1713 )\n argument279 = argument\n @state.following.pop\n\n @adaptor.add_child( root_1, argument279.tree )\n\n\n else\n break # out of loop for decision 37\n end\n end # loop for decision 37\n\n match( UP, nil )\n end\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 13\n root_0 = @adaptor.create_flat_list\n\n\n # at line 241:5: ^( OBJECT ( property_definition )* )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n __OBJECT280__ = match( OBJECT, TOKENS_FOLLOWING_OBJECT_IN_literal_1724 )\n\n tree_for_OBJECT280 = @adaptor.copy_node( __OBJECT280__ )\n\n root_1 = @adaptor.become_root( tree_for_OBJECT280, root_1 )\n\n\n\n if @input.peek == DOWN\n match( DOWN, nil )\n # at line 241:15: ( property_definition )*\n while true # decision 38\n alt_38 = 2\n look_38_0 = @input.peek( 1 )\n\n if ( look_38_0 == GET || look_38_0 == COLON || look_38_0 == SET )\n alt_38 = 1\n\n end\n case alt_38\n when 1\n # at line 241:15: property_definition\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_property_definition_IN_literal_1726 )\n property_definition281 = property_definition\n @state.following.pop\n\n @adaptor.add_child( root_1, property_definition281.tree )\n\n\n else\n break # out of loop for decision 38\n end\n end # loop for decision 38\n\n match( UP, nil )\n end\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 14\n root_0 = @adaptor.create_flat_list\n\n\n # at line 242:5: ^( 'function' ( ID )? parameters statement_block )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal282 = match( FUNCTION, TOKENS_FOLLOWING_FUNCTION_IN_literal_1737 )\n\n tree_for_string_literal282 = @adaptor.copy_node( string_literal282 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal282, root_1 )\n\n\n\n match( DOWN, nil )\n # at line 242:19: ( ID )?\n alt_39 = 2\n look_39_0 = @input.peek( 1 )\n\n if ( look_39_0 == ID )\n alt_39 = 1\n end\n case alt_39\n when 1\n # at line 242:19: ID\n _last = @input.look\n __ID283__ = match( ID, TOKENS_FOLLOWING_ID_IN_literal_1739 )\n\n tree_for_ID283 = @adaptor.copy_node( __ID283__ )\n\n @adaptor.add_child( root_1, tree_for_ID283 )\n\n\n\n end\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_parameters_IN_literal_1742 )\n parameters284 = parameters\n @state.following.pop\n\n @adaptor.add_child( root_1, parameters284.tree )\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_statement_block_IN_literal_1744 )\n statement_block285 = statement_block\n @state.following.pop\n\n @adaptor.add_child( root_1, statement_block285.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n when 15\n root_0 = @adaptor.create_flat_list\n\n\n # at line 243:5: ^( '->' ( parameters )? statement_block )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal286 = match( ARROW, TOKENS_FOLLOWING_ARROW_IN_literal_1754 )\n\n tree_for_string_literal286 = @adaptor.copy_node( string_literal286 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal286, root_1 )\n\n\n\n match( DOWN, nil )\n # at line 243:13: ( parameters )?\n alt_40 = 2\n look_40_0 = @input.peek( 1 )\n\n if ( look_40_0 == PARAMS )\n alt_40 = 1\n end\n case alt_40\n when 1\n # at line 243:13: parameters\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_parameters_IN_literal_1756 )\n parameters287 = parameters\n @state.following.pop\n\n @adaptor.add_child( root_1, parameters287.tree )\n\n\n end\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_statement_block_IN_literal_1759 )\n statement_block288 = statement_block\n @state.following.pop\n\n @adaptor.add_child( root_1, statement_block288.tree )\n\n match( UP, nil )\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n end\n return_value.tree = @adaptor.rule_post_processing( root_0 )\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 33 )\n\n end\n \n return return_value\n end", "def read_next()\n return nil if @at_end\n\n begin\n pos = @marker.position\n\n if @marker.character == ?\\n\n pos.line += 1\n pos.column = 0\n end\n\n @marker.character = @reader.next\n @marker.source_index += 1\n pos.column += 1\n rescue StopIteration\n @at_end = true\n @marker.character = nil\n end\n\n @marker.character\n end", "def literal(buffer)\n reader = lambda { |string = ''|\n buffer.major_mode.read(1) do |event|\n if unicode = event.unicode\n string += unicode # copy\n buffer.message string.inspect\n\n case result = literal_handle(buffer, string)\n when nil\n reader.call(string)\n when String\n literal_insert(buffer, result)\n end\n else\n return # Unverrichteter Dinge\n end\n end\n }\n\n reader.call\n end", "def next_token\n\n if @ss.bol?\n @line+=1\n @[email protected]\n end\n\n position=[@line,@ss.pos-@old_pos+1]\n\n return :eos if @ss.eos?\n\n case\n when text = @ss.scan(NEWLINE)\n next_token()\n when text = @ss.scan(SPACE)\n next_token()\n when text = @ss.scan(COMMENT)\n next_token()\n when text = @ss.scan(ARROW)\n return Token.new [:arrow,text,position]\n when text = @ss.scan(LT)\n return Token.new [:lt,text,position]\n when text = @ss.scan(LBRACK)\n return Token.new [:lbrack,text,position]\n when text = @ss.scan(RBRACK)\n return Token.new [:rbrack,text,position]\n when text = @ss.scan(IDENTIFIER)\n case\n when value = text.match(IDENT)\n return Token.new [:IDENT,text,position]\n when value = text.match(FLOAT)\n return Token.new [:FLOAT,text,position]\n when value = text.match(INT)\n return Token.new [:INT,text,position]\n when value = text.match(STRING)\n return Token.new [:STRING,text,position]\n when value = text.match(MODULE)\n return Token.new [:module,text,position]\n when value = text.match(CLASS)\n return Token.new [:class,text,position]\n when value = text.match(END_)\n return Token.new [:end,text,position]\n when value = text.match(ATTR)\n return Token.new [:attr,text,position]\n when value = text.match(LPAREN)\n return Token.new [:lparen,text,position]\n when value = text.match(RPAREN)\n return Token.new [:rparen,text,position]\n else\n return Token.new [:identifier,text,position]\n end\n else\n x = @ss.getch\n return Token.new [x, x,position]\n end\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n if ss.peek(1) == \"\\n\"\n self.lineno += 1\n # line starts 1 position after the newline\n self.start_of_current_line_pos = ss.pos + 1\n end\n self.old_pos = ss.pos\n token =\n case state\n when nil then\n case\n when ss.skip(/[ \\t]+/) then\n # do nothing\n when ss.skip(/\\/\\/[^\\r\\n]*/) then\n # do nothing\n when text = ss.scan(/\\r|\\n/) then\n newline text\n when text = ss.scan(/[!=<>]=?/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/[(){},;.\\-+\\/*]/) then\n action { [:SPECIAL, text] }\n when text = ss.scan(/#{DIGIT}+(\\.#{DIGIT}+)?/) then\n action { [:NUMBER, text] }\n when text = ss.scan(/nil/) then\n action { [:NIL, text] }\n when text = ss.scan(/false/) then\n action { [:FALSE, text] }\n when text = ss.scan(/true/) then\n action { [:TRUE, text] }\n when text = ss.scan(/#{ALPHA}(#{ALPHA}|#{DIGIT})*/) then\n action { [:IDENTIFIER, text] }\n when ss.skip(/\"\"/) then\n action { [:STRING, '\"\"'] }\n when ss.skip(/\"/) then\n [:state, :IN_STRING]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n when :IN_STRING then\n case\n when text = ss.scan(/[^\"]+/) then\n action { [:STRING, \"\\\"#{text}\\\"\"] }\n when ss.skip(/\"/) then\n [:state, nil]\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def next\n ret = peek_next\n @str.slice! @last_re if ret.type != :eos\n\n ret\n end", "def next()\n if @ss.scan_until(token_re)\n term = @ss.matched\n term_end = @ss.pos\n term_start = term_end - term.size\n else\n return nil\n end\n\n return Token.new(normalize(term), term_start, term_end)\n end", "def next_token; end", "def peek_next\n fail 'No string specified' unless @str\n\n return Token.new(:eos) if skip_space == :eos\n\n PATTERNS.each do |re, func|\n re.match(@str) do |mat|\n @last_re = re # This is what will be removed\n mat = mat.to_s\n return func.is_a?(Symbol) ? send(func, mat) : instance_exec(mat, &func)\n end\n end\n end", "def next_token\n @sy = @tokenizer.next_token\n \n # ignore EOL tokens since no productions would accept them\n while @sy.type == TokenType::EOL_TOKEN\n @sy = @tokenizer.next_token\n end\n end", "def literal_token\n if match = @chunk.match(OPERATOR)\n value, _ = *match\n else\n value = @chunk[0]\n end\n tag = value\n\n if COMPOUND_ASSIGN.include?(value)\n tag = :COP\n else\n case value\n when '(', '{', '[' then @ends.push(INVERSES[value])\n when ')', '}', ']'\n prev = @tokens[-1]\n pair(value)\n tokens.delete_at(-1) if prev && prev[0] == :TERM\n end\n end\n token(tag, value)\n value.size\n end", "def process_lit(exp)\n # TODO what about floats and big numbers?\n\n value = exp.shift\n c_type = exp.c_type\n case c_type\n when CType.long, CType.float then\n return value.to_s\n when CType.symbol then\n return value.to_s.inspect # HACK wrong! write test!\n else\n raise \"Bug! no: Unknown literal #{value}:#{value.class}\"\n end\n end", "def lex_en_expr_beg; end", "def lex_en_expr_beg; end", "def lex_en_expr_beg; end", "def literal; end", "def octal_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 70 )\n\n\n\n type = OctalLiteral\n channel = ANTLR3::DEFAULT_CHANNEL\n # - - - - label initialization - - - -\n\n\n # - - - - main rule block - - - -\n # at line 527:16: '0' ( '0' .. '7' )+\n match( 0x30 )\n # at file 527:20: ( '0' .. '7' )+\n match_count_22 = 0\n while true\n alt_22 = 2\n look_22_0 = @input.peek( 1 )\n\n if ( look_22_0.between?( 0x30, 0x37 ) )\n alt_22 = 1\n\n end\n case alt_22\n when 1\n # at line \n if @input.peek( 1 ).between?( 0x30, 0x37 )\n @input.consume\n else\n mse = MismatchedSet( nil )\n recover mse\n raise mse\n\n end\n\n\n\n else\n match_count_22 > 0 and break\n eee = EarlyExit(22)\n\n\n raise eee\n end\n match_count_22 += 1\n end\n\n\n\n\n @state.type = type\n @state.channel = channel\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 70 )\n\n\n end", "def next_token\n #dputs \"@line: \" + @line\n if @state == :normal\n while true\n temp = _next_token\n unless temp == \"#white_space\" || temp == \"#comment\"\n break\n end\n end\n #dputs \"token: \" + temp\n @current_token = temp\n return temp\n else\n return :Terminate\n end\n \n end", "def next_item\n return @last_lexeme if @last_lexeme[0].nil?\n while true\n @line = next_line if buffer_empty?\n if @line.nil?\n lexeme = [nil, @line_no, 1]\n break\n end\n\n # Skip whitespaces\n while space?(@line[@pos])\n @pos += 1\n end\n\n # Skip triple dot characters (join lines)\n if @line[@pos, 4] == \"...\\n\" || @line[@pos, 2] == \"…\\n\"\n line_no, pos = @line_no, @pos + 1\n @line, @pos = next_line, 0\n if @line.nil? || @line.strip.empty?\n raise SyntaxError.new(line_no, pos, 'Line continuation may not be followed by an empty line')\n end\n next\n end\n\n # Skip one line comments\n if @line[@pos, 3] == 'BTW'\n @pos = @line.length - 1\n end\n # and multiline ones\n if @last_lexeme[0] == \"\\n\" && @line[@pos, 4] == 'OBTW'\n tldr_found, line_no, pos = false, @line_no, @pos + 1\n while true\n @line = next_line\n break if @line.nil?\n m = @line.chomp.match(/(^|\\s+)TLDR\\s*(,|$)/)\n unless m.nil?\n tldr_found = true\n @pos = m.end(0)\n break\n end\n end\n unless tldr_found\n raise SyntaxError.new(line_no, pos, 'Unterminated multiline comment')\n end\n next\n end\n\n if @line[@pos] == \"\\n\" || @line[@pos] == '!'\n # Handle newline and bang separately\n lexeme = [@line[@pos], @line_no, @pos + 1]\n @pos += 1\n elsif @line[@pos] == ','\n # Comma is a virtual newline\n lexeme = [\"\\n\", @line_no, @pos + 1]\n @pos += 1\n elsif @line[@pos] == '\"'\n # Strings begin with \"\n # Need to handle empty strings separately\n if @line[@pos + 1] == '\"'\n string = '\"\"'\n else\n m = @line.match(/([^:](?:::)*)\"/, @pos + 1)\n string = @line[@pos..m.end(0) - 1] unless m.nil?\n end\n # String must be followed by an allowed lexeme delimiter\n if string.nil? || !lexeme_delimiter?(@pos + string.length)\n raise SyntaxError.new(@line_no, @pos + 1, 'Unterminated string constant')\n end\n lexeme = [%Q[\"#{escape_string(string[1..-2])}\"], @line_no, @pos + 1]\n @pos = @pos + string.length\n else\n # Grab as much characters as we can until meeting lexeme delimiter\n # Treat what we grabbed as a lexeme\n seq, pos = '', @pos + 1\n until lexeme_delimiter?(@pos)\n seq += @line[@pos]\n @pos += 1\n end\n lexeme = [seq, @line_no, pos]\n end\n\n break\n end\n @last_lexeme = lexeme\n end", "def next_token\n return @extra_tokens.pop unless @extra_tokens.empty?\n\n skip_whitespace\n c = @sql[@pos, 1]\n return next_string(c) if quote?(c)\n\n first_is_identifier_char = identifier_char?(c)\n t = c\n @pos += 1\n while @pos < @length\n c = @sql[@pos, 1]\n break if c == ' '\n\n this_is_identifier_char = identifier_char?(c)\n break if first_is_identifier_char != this_is_identifier_char && @length > 0\n break if !this_is_identifier_char && quote?(c)\n\n t << c\n @pos += 1\n end\n\n case t\n when ''\n nil\n when /^\\d+$/\n t.to_i\n else\n t\n end\n end", "def lex_en_line_begin; end", "def lex_en_line_begin; end", "def lex_en_line_begin; end", "def lit(sexp, level)\n val = sexp.shift\n case val\n when Numeric\n val.inspect\n when Symbol\n @symbols[val.to_s] ||= \"$symbol_#{@sym_id += 1}\"\n when Regexp\n val == // ? /^/.inspect : val.inspect\n when Range\n \"$range(#{val.begin}, #{val.end}, #{val.exclude_end?})\"\n else\n raise \"Bad lit: #{val.inspect}\"\n end\n end", "def string_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 42)\n\n type = STRING_LITERAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 486:4: '\\\\'' LITERAL_CHAR ( LITERAL_CHAR )* '\\\\''\n match(?\\')\n literal_char!\n # at line 486:22: ( LITERAL_CHAR )*\n loop do #loop 5\n alt_5 = 2\n look_5_0 = @input.peek(1)\n\n if (look_5_0.between?(0x0000, ?&) || look_5_0.between?(?(, 0xFFFF)) \n alt_5 = 1\n\n end\n case alt_5\n when 1\n # at line 486:22: LITERAL_CHAR\n literal_char!\n\n else\n break #loop 5\n end\n end\n match(?\\')\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 42)\n\n end", "def next_input_element(hint)\n if ret = @lit_cache[@pos]\n @pos = @lit_nextpos[@pos]\n @head_pos = @pos\n return ret\n end\n pos0 = @pos\n #\n # skip white space here, because ECMA262(5.1.2) says:\n #\n # Simple white space and single-line comments are discarded and\n # do not appear in the stream of input elements for the\n # syntactic grammar.\n #\n while white_space or single_line_comment\n end\n\n ret = line_terminator || multi_line_comment || token\n if ret\n @lit_cache[pos0] = ret\n @lit_nextpos[pos0] = @pos\n @head_pos = @pos\n return ret\n end\n\n if @codes[@pos].nil?\n return nil\n end\n if hint.nil?\n if @codes[@pos] == 0x2f\n ECMA262::LIT_DIV_OR_REGEXP_LITERAL\n else\n nil\n end\n elsif hint == :div\n ret = div_punctuator\n if ret\n @lit_cache[pos0] = ret\n @lit_nextpos[pos0] = @pos\n end\n @head_pos = @pos\n return ret\n elsif hint == :regexp\n ret = regexp_literal\n if ret\n @lit_cache[pos0] = ret\n @lit_nextpos[pos0] = @pos\n end\n @head_pos = @pos\n return ret\n else\n if @codes[@pos] == 0x2f\n ECMA262::LIT_DIV_OR_REGEXP_LITERAL\n else\n nil\n end\n end\n end", "def next_token\n token = @enum[@pointer]\n raise NonstringTokenError unless token.nil? || token.kind_of?(String) \n @pointer += 1\n token\n end", "def next\n\t\tif @next_token\n\t\t\ttoken = @next_token\n\t\t\t@next_token = nil\n\t\t\treturn token\n\t\telse\n\t\t\ttoken = read_token\n\t\t\treturn token\n\t\tend\n\tend", "def next\n token = next_token\n token = next_token while token&.empty?\n token\n end", "def next_token\n tokens.shift\n end", "def push_literal\n <<-CODE\n next_int;\n t1 = cpu_current_literals(state, c);\n t2 = fast_fetch(t1, _int);\n stack_push(t2);\n CODE\n end", "def consume!\n empty_line ||\n name_token ||\n comment_token ||\n whitespace_token ||\n line_token ||\n heredoc_token ||\n string_token ||\n number_token ||\n regex_token ||\n literal_token\n end", "def next_line\r\n while true\r\n if (@lexemes[@pointer] != nil && @lexemes[@pointer] != '@')\r\n @pointer = @pointer.next\r\n else\r\n break\r\n end\r\n end\r\n end", "def process_lit(exp)\n # TODO: audit against obfuscator\n value = exp.shift\n case value\n when Integer then\n return \"LONG2NUM(#{value})\"\n when Float then\n return \"rb_float_new(#{value})\"\n when Symbol\n return \"ID2SYM(rb_intern(#{value.to_s.inspect}))\"\n when Range\n f = process_lit [ value.first ]\n l = process_lit [ value.last ]\n x = 0\n x = 1 if value.exclude_end?\n\n return \"rb_range_new(#{f}, #{l}, #{x})\"\n when Regexp\n src = value.source\n return \"rb_reg_new(#{src.inspect}, #{src.size}, #{value.options})\"\n else\n raise \"Bug! no: Unknown literal #{value}:#{value.class}\"\n end\n return nil\n end", "def peek_token\n return nil if @start >= @expr.length\n if @start == 0 && @finish == 0\n return @expr[0]\n else\n token = @expr[@start...@finish]\n\n if token.empty?\n @finish = @finish + 1\n peek_token\n else\n return token\n end\n end\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n token =\n case state\n when nil then\n case\n when ss.skip(/\\s+/) then\n # do nothing\n when ss.skip(/:(#{SYMBOL_NAME})/o) then\n action { emit :tSYMBOL, &:to_sym }\n when ss.skip(/\"(.+?)\"/) then\n action { emit :tSTRING }\n when ss.skip(/[-+]?\\d+\\.\\d+/) then\n action { emit :tNUMBER, &:to_f }\n when ss.skip(/[-+]?\\d+/) then\n action { emit :tNUMBER, &:to_i }\n when ss.skip(/#{Regexp.union(\n %w\"( ) { | } [ ] < > $ ! ^ ` ... + * ? ,\"\n )}/o) then\n action { emit ss.matched, &:to_sym }\n when ss.skip(/#{REGEXP}/o) then\n action { emit_regexp }\n when ss.skip(/%?(#{CONST_NAME})/o) then\n action { emit :tPARAM_CONST }\n when ss.skip(/%([a-z_]+)/) then\n action { emit :tPARAM_NAMED }\n when ss.skip(/%(\\d*)/) then\n action { emit(:tPARAM_NUMBER) { |s| s.empty? ? 1 : s.to_i } } # Map `%` to `%1`\n when ss.skip(/_(#{IDENTIFIER})/o) then\n action { emit :tUNIFY }\n when ss.skip(/_/o) then\n action { emit :tWILDCARD }\n when ss.skip(/\\#(#{CALL})/o) then\n action { @state = :ARG; emit :tFUNCTION_CALL, &:to_sym }\n when ss.skip(/#{IDENTIFIER}\\?/o) then\n action { @state = :ARG; emit :tPREDICATE, &:to_sym }\n when ss.skip(/#{NODE_TYPE}/o) then\n action { emit :tNODE_TYPE, &:to_sym }\n when ss.skip(/\\#.*/) then\n action { emit_comment }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n when :ARG then\n case\n when ss.skip(/\\(/) then\n action { @state = nil; emit :tARG_LIST }\n when ss.skip(//) then\n action { @state = nil }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def push_literal(*args)\n new_literal = Literal.new(self, *args)\n @literal_stack.push(new_literal)\n\n if new_literal.words? && new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_backslash_delimited_words\n else\n self.class.lex_en_plain_backslash_delimited_words\n end\n elsif new_literal.words? && !new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_words\n else\n self.class.lex_en_plain_words\n end\n elsif !new_literal.words? && new_literal.backslash_delimited?\n if new_literal.interpolate?\n self.class.lex_en_interp_backslash_delimited\n else\n self.class.lex_en_plain_backslash_delimited\n end\n else\n if new_literal.interpolate?\n self.class.lex_en_interp_string\n else\n self.class.lex_en_plain_string\n end\n end\n end", "def next()\n return \" \" unless has_next()\n if(@count <= 0)\n @char = @compressed_string[@i]\n @i += 1\n @count = get_count()\n end\n @count -= 1\n return @char\n end", "def next_token\n \n # Early return if there is nothing to be read. This means we've reached the end of the file.\n \n unless @file[@pos]\n return nil\n end\n \n # This is the token that will be returned.\n token = Compiler::Token.new\n \n # Initializes a new instance of the automaton.\n automaton = Automaton.new\n \n # Will be set inside the loop, if necessary.\n increment_next = false\n \n # Will be set inside the loop. Marks whether we've reached the end of the file.\n eof = false\n \n # Build a new token while we don't have a new word yet and isn't in the failed state\n while ((automaton.state != :A || automaton.word.empty?) && automaton.state != :failed)\n \n # The next input for the automaton\n char = @file[@pos]\n \n if char\n \n # Moves the pointer to the next char\n @pos += 1\n \n automaton.transition(char)\n \n # While the automaton hasn't started to build a new word yet, increments the line and column numbers.\n # In this phase, we're just skipping blank characters\n if automaton.word.empty?\n if increment_next\n if char == \"\\n\"\n increment_next = true\n else\n increment_next = false\n end\n @line += 1\n @column = 0\n elsif char == \"\\n\"\n @column += 1\n increment_next = true\n else\n @column += 1\n end\n end\n \n else\n eof = true\n puts \"breaking\"\n break\n end\n end\n \n \n \n if eof\n automaton.transition(\"\\n\")\n else\n @pos -= 1\n end\n \n if (automaton.type == :identifier) && (Compiler.reserved_words.is_reserved?(automaton.word))\n token.type = :reserved_word\n else\n token.type = automaton.type\n end\n \n token.value = automaton.word\n token.line = @line\n token.column = @column\n \n return token\n \n end", "def next!() end", "def lex_en_line_begin=(_arg0); end", "def lex_en_line_begin=(_arg0); end", "def lex_en_line_begin=(_arg0); end", "def racc_read_token(t, tok, val); end", "def next_token\n @tokens.shift\n end", "def find_literal(what)\n idx = @literals.index(what)\n return idx if idx\n add_literal(what)\n end", "def next_token; @stack.shift; end", "def char_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 41)\n\n type = CHAR_LITERAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 482:4: '\\\\'' LITERAL_CHAR '\\\\''\n match(?\\')\n literal_char!\n match(?\\')\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 41)\n\n end", "def lex_start; end", "def lex_start; end", "def lex_start; end", "def lex_start; end", "def getNextToken\n \n #Check if the end has been reached\n if @currentChar == nil\n return\n end\n if @currentChar.match(/\\s/) != nil\n skipWhitespaces\n end\n \n if @currentChar == '%'\n comment\n if @currentChar.match(/\\s/) != nil\n skipWhitespaces\n end\n end \n \n if @currentChar.match(/[A-Za-z0-9_]/) != nil\n return Token.new(NAME, name)\n end\n \n if @currentChar == \"\\\"\"\n return Token.new(STRING, string)\n end\n \n if @currentChar == '{'\n advance\n return Token.new(OPENING_BRACE,'{')\n end\n \n if @currentChar == '}'\n advance\n return Token.new(CLOSING_BRACE,'}')\n end\n \n if @currentChar == '['\n advance\n return Token.new(OPENING_BRACKET,'[')\n end\n \n if @currentChar == ']'\n advance\n return Token.new(CLOSING_BRACKET,']')\n end\n \n if @currentChar == ':'\n advance\n return Token.new(COLON,':')\n end\n \n if @currentChar == '*'\n advance\n return Token.new(ASTERIX,'*')\n end\n \n if @currentChar == '='\n advance\n return Token.new(EQUALS,'=')\n end\n \n if @currentChar == ';'\n advance\n return Token.new(SEMICOLON,';')\n end\n \n if @currentChar == '^'\n advance\n return Token.new(CIRCUMFLEX,'^')\n end\n \n if @currentChar == '+'\n advance\n return Token.new(PLUS,'+')\n end\n if @currentChar == '('\n advance\n return Token.new(OPENING_PARANTHESIS,'(')\n end\n if @currentChar == ')'\n advance\n return Token.new(CLOSING_PARANTHESIS,')')\n end\n if @currentChar == '.'\n advance\n return Token.new(DOT,'.')\n end\n if @currentChar == '#'\n advance\n return Token.new(HASH,'#')\n end\n if @currentChar == ','\n advance\n return Token.new(COMMA,',')\n end\n error\n \n return Token.new(EOF,'EOF') \n \n end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def _lex_eof_trans; end", "def next\n displacement = @file.gets.try(:chomp).try(:to_f)\n return nil unless displacement\n\n ret = @curr_val\n @curr_val += displacement\n ret\n end", "def set_literal\n <<-CODE\n next_int;\n tuple_put(state, cpu_current_literals(state, c), _int, stack_top());\n CODE\n end", "def match(ptr, depth = 0)\n case c = ptr.peek(1)\n when '\"', '`'\n start_pos = ptr.pos\n ptr.pos += 1\n AST.new(:string, value: ptr.scan_until(/#{c}/).chop,\n attributes: { type: char_to_type(c) },\n pos: start_pos)\n end\n end", "def run(source, until_token = :invalid, token_count = nil)\n @at_end = false\n @source = source\n @reader = source.each_char\n\n read_next()\n\n while token_count == nil || token_count > 0\n skip_whitespace()\n current = @marker.character\n break unless current\n\n token = Token.new\n token.kind = :invalid\n token.from = @marker.source_index\n token.position = @marker.position.dup\n\n case current\n when ?\", ?'\n read_string(token)\n\n when ?0\n case peek_next()\n when ?x, ?X, ?b, ?B then read_base_number(token)\n else read_number(token)\n end\n\n when ?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9\n read_number(token)\n\n # dot, double dot, triple dot, and floats beginning with a dot\n when ?.\n token.kind = :dot\n case peek_next()\n when ?0, ?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9 then read_number(token)\n when ?.\n read_next()\n token.kind = :double_dot\n\n if peek_next() == ?.\n read_next()\n token.kind = :triple_dot\n end\n\n token.value = Token::DESCRIPTORS[token.kind]\n else\n token.value = Token::DESCRIPTORS[token.kind]\n end\n\n when ?_, ?a, ?b, ?c, ?d, ?e, ?f, ?g, ?h, ?i, ?j, ?k, ?l, ?m, ?n, ?o, ?p,\n ?q, ?r, ?s, ?t, ?u, ?v, ?w, ?x, ?y, ?z, ?A, ?B, ?C, ?D, ?E, ?F, ?G, ?H,\n ?I, ?J, ?K, ?L, ?M, ?N, ?O, ?P, ?Q, ?R, ?S, ?T, ?U, ?V, ?W, ?X, ?Y, ?Z\n read_word(token)\n\n when ?\\n\n token.value = current\n token.kind = :newline\n\n when ??, ?#, ?@, ?$, ?%, ?(, ?), ?[, ?], ?{, ?}, ?^, ?~, ?`, ?\\\\, ?,, ?;\n token.value = current\n token.kind = PUNCTUATION_STRINGS[current]\n\n when ?=, ?|, ?&, ?:, ?+, ?*\n current << read_next() if peek_next() == current\n token.value = current\n token.kind = PUNCTUATION_STRINGS[current]\n\n when ?!\n current << read_next() if peek_next() == ?=\n token.value = current\n token.kind = PUNCTUATION_STRINGS[current]\n\n when ?>, ?<\n case peek_next()\n when ?=, current then current << read_next()\n end\n token.value = current\n token.kind = PUNCTUATION_STRINGS[current]\n\n when ?-\n case peek_next()\n when ?>, current then current << read_next()\n end\n token.value = current\n token.kind = PUNCTUATION_STRINGS[current]\n\n when ?/\n case peek_next()\n when ?/ then read_line_comment(token)\n when ?* then read_block_comment(token)\n else\n token.value = Token::DESCRIPTORS[token.kind = :slash]\n read_next()\n end\n\n end # case current\n\n token.to = @marker.source_index\n last_kind = token.kind\n if !(@skip_comments && token.comment?) && !(@skip_newlines && token.newline?)\n if last_kind != :invalid\n @tokens << token\n yield token if block_given?\n else\n raise RuntimeError, \"#{token.position} Invalid token: #{token.inspect}\"\n end\n end\n\n break if until_token == last_kind\n\n read_next()\n token_count -= 1 unless token_count.nil?\n end # while current && token_count > 0\n\n @source = nil\n @reader = nil\n\n self\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n token =\n case state\n when nil then\n case\n when text = ss.scan(/#{DIGIT}/) then\n action { [:DIGIT, text.to_i] }\n when text = ss.scan(/#{ADDITION}/) then\n action { [:ADDITION, text] }\n when text = ss.scan(/#{SUBSTRACTION}/) then\n action { [:SUBSTRACTION, text] }\n when text = ss.scan(/#{MULTIPLICATION}/) then\n action { [:MULTIPLICATION, text] }\n when text = ss.scan(/#{DIVISION}/) then\n action { [:DIVISION, text] }\n when text = ss.scan(/#{OPENING_PARANTHESIS}/) then\n action { [:OPENING_PARANTHESIS, text] }\n when text = ss.scan(/#{CLOSING_PARANTHESIS}/) then\n action { [:CLOSING_PARANTHESIS, text] }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def get_token\n @tokenbuf << read_token if @tokenbuf.length == 0\n return @tokenbuf.shift\n end", "def next_token\n\n token = nil\n\n until ss.eos? or token do\n if ss.check(/\\n/) then\n self.lineno += 1\n # line starts 1 position after the newline\n self.start_of_current_line_pos = ss.pos + 1\n end\n self.old_pos = ss.pos\n token =\n case state\n when nil, :option, :inner, :start, :macro, :rule, :group then\n case\n when ss.skip(/options?.*/) then\n [:state, :option]\n when ss.skip(/inner.*/) then\n [:state, :inner]\n when ss.skip(/macros?.*/) then\n [:state, :macro]\n when ss.skip(/rules?.*/) then\n [:state, :rule]\n when ss.skip(/start.*/) then\n [:state, :start]\n when ss.skip(/end/) then\n [:state, :END]\n when ss.skip(/\\A((?:.|\\n)*)class ([\\w:]+.*)/) then\n action { [:class, *matches] }\n when ss.skip(/\\n+/) then\n # do nothing\n when text = ss.scan(/\\s*(\\#.*)/) then\n action { [:comment, text] }\n when (state == :option) && (ss.skip(/\\s+/)) then\n # do nothing\n when (state == :option) && (text = ss.scan(/stub/i)) then\n action { [:option, text] }\n when (state == :option) && (text = ss.scan(/debug/i)) then\n action { [:option, text] }\n when (state == :option) && (text = ss.scan(/do_parse/i)) then\n action { [:option, text] }\n when (state == :option) && (text = ss.scan(/lineno/i)) then\n action { [:option, text] }\n when (state == :option) && (text = ss.scan(/column/i)) then\n action { [:option, text] }\n when (state == :inner) && (text = ss.scan(/.*/)) then\n action { [:inner, text] }\n when (state == :start) && (text = ss.scan(/.*/)) then\n action { [:start, text] }\n when (state == :macro) && (ss.skip(/\\s+(\\w+)\\s+#{RE}/o)) then\n action { [:macro, *matches] }\n when (state == :rule) && (ss.skip(/\\s*#{ST}?[\\ \\t]*#{RE}[\\ \\t]*#{ACT}?/o)) then\n action { [:rule, *matches] }\n when (state == :rule) && (ss.skip(/\\s*:[\\ \\t]*#{RE}/o)) then\n action { [:grouphead, *matches] }\n when (state == :group) && (ss.skip(/\\s*:[\\ \\t]*#{RE}/o)) then\n action { [:grouphead, *matches] }\n when (state == :group) && (ss.skip(/\\s*\\|\\s*#{ST}?[\\ \\t]*#{RE}[\\ \\t]*#{ACT}?/o)) then\n action { [:group, *matches] }\n when (state == :group) && (ss.skip(/\\s*#{ST}?[\\ \\t]*#{RE}[\\ \\t]*#{ACT}?/o)) then\n action { [:groupend, *matches] }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n when :END then\n case\n when ss.skip(/\\n+/) then\n # do nothing\n when text = ss.scan(/.*/) then\n action { [:end, text] }\n else\n text = ss.string[ss.pos .. -1]\n raise ScanError, \"can not match (#{state.inspect}) at #{location}: '#{text}'\"\n end\n else\n raise ScanError, \"undefined state at #{location}: '#{state}'\"\n end # token = case state\n\n next unless token # allow functions to trigger redo w/ nil\n end # while\n\n raise LexerError, \"bad lexical result at #{location}: #{token.inspect}\" unless\n token.nil? || (Array === token && token.size >= 2)\n\n # auto-switch state\n self.state = token.last if token && token.first == :state\n\n token\n end", "def peek\n @tokens[@position]\n end", "def read_character\n lit = read_literal\n\n return \" \" if lit.empty? && peek_char == \" \"\n CHARACTERS.fetch(lit.downcase) do\n # Return just the first character\n unread(lit[1..-1])\n lit[0,1]\n end\n end", "def next\n @tok ||= read_token\n @tok, tok = nil, @tok\n @prev = tok\n return tok\n end", "def get_next\n return if eof?\n\n @buffer << @io.gets if @buffer.empty?\n\n until @io.eof?\n line = @io.gets\n next unless line\n\n if @parser.start_new?(line) || @buffer.empty?\n @buffer << line\n break\n else\n @buffer.last << line\n end\n end\n\n return if @buffer.empty?\n @parser.parse(@buffer.slice!(0)) || self.get_next\n end", "def lex_en_expr_mid; end", "def lex_en_expr_mid; end", "def lex_en_expr_mid; end", "def _reduce_279(val, _values, result)\n result = lexer.line\n \n result\nend", "def get\n @source_index += 1\n\n # Maintain line count.\n prev_char = @source_text[@source_index - 1]\n if @source_index.positive? && prev_char == \"\\n\"\n @line_index += 1\n @col_index = -1\n end\n\n @col_index += 1\n char = if @source_index > @last_index\n # Read past the end of source text.\n END_MARK\n else\n @source_text[@source_index]\n end\n Character.new(char, @line_index, @col_index, @source_index, @source_text)\n end", "def lex_en_expr_beg=(_arg0); end", "def lex_en_expr_beg=(_arg0); end", "def lex_en_expr_beg=(_arg0); end", "def la( count = 1 )\n until @lookahead.length >= count\n if token = @lexer.next_token( @lexer_state.number ) then\n @lookahead << token\n else\n nyi \"error handling for lexer error\" if @lexer.input_remaining?\n break\n end\n end\n \n return @lookahead[count - 1]\n end", "def string_literal\n # StringLiteral ::\n # \" DoubleStringCharactersopt \"\n # ' SingleStringCharactersopt '\n #\n # DoubleStringCharacters ::\n # DoubleStringCharacter DoubleStringCharactersopt\n #\n # SingleStringCharacters ::\n # SingleStringCharacter SingleStringCharactersopt\n #\n # DoubleStringCharacter ::\n # SourceCharacter but not one of \" or \\ or LineTerminator\n # \\ EscapeSequence\n # LineContinuation\n #\n # SingleStringCharacter ::\n # SourceCharacter but not one of ' or \\ or LineTerminator\n # \\ EscapeSequence\n # LineContinuation\n #\n if (code = @codes[@pos]) == 0x27 #'\n term = 0x27\n elsif code == 0x22 #\"\n term = 0x22\n else\n return nil\n end\n @pos += 1\n pos0 = @pos\n\n str = []\n while (code = @codes[@pos])\n if code.nil?\n raise ParseError.new(\"no `#{term}' at end of string\", self)\n elsif line_terminator?(code)\n raise ParseError.new(\"string has line terminator in body\", self)\n elsif code == 0x5c #\\\n @pos += 1\n str.push(escape_sequence)\n elsif code == term\n @pos += 1\n return ECMA262::ECMA262String.new(str.compact.pack(\"U*\"))\n else\n @pos += 1\n str.push(code)\n end\n end\n nil\n end", "def next_token\n\t\[email protected]_token\n\tend", "def next()\n @index += 1\n @string[@index...(@index+1)]\n end", "def lex_en_expr_end; end", "def lex_en_expr_end; end", "def lex_en_expr_end; end", "def double_angle_string_literal!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in(__method__, 45)\n\n type = DOUBLE_ANGLE_STRING_LITERAL\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 500:4: '<<' ( . )* '>>'\n match(\"<<\")\n # at line 500:9: ( . )*\n loop do #loop 8\n alt_8 = 2\n look_8_0 = @input.peek(1)\n\n if (look_8_0 == ?>) \n look_8_1 = @input.peek(2)\n\n if (look_8_1 == ?>) \n alt_8 = 2\n elsif (look_8_1.between?(0x0000, ?=) || look_8_1.between?(??, 0xFFFF)) \n alt_8 = 1\n\n end\n elsif (look_8_0.between?(0x0000, ?=) || look_8_0.between?(??, 0xFFFF)) \n alt_8 = 1\n\n end\n case alt_8\n when 1\n # at line 500:9: .\n match_any\n\n else\n break #loop 8\n end\n end\n match(\">>\")\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out(__method__, 45)\n\n end", "def next() end", "def next() end", "def extract_first_expression(lines, consume = T.unsafe(nil), &block); end", "def next_word\n return unless md = get.match(FORWARD_WORD, cursor)\n self.cursor = md.offset(0).last\n end", "def next_token\n return [false, false] if @src.eos?\n# p @src.rest if @yydebug\n if ret = @src.scan(EM_OPEN_RE)\n @pre << ret\n [:EM_OPEN, ret]\n elsif ret = @src.scan(EM_CLOSE_RE)\n @pre << ret\n [:EM_CLOSE, ret]\n elsif ret = @src.scan(CODE_OPEN_RE)\n @pre << ret\n [:CODE_OPEN, ret]\n elsif ret = @src.scan(CODE_CLOSE_RE)\n @pre << ret\n [:CODE_CLOSE, ret]\n elsif ret = @src.scan(VAR_OPEN_RE)\n @pre << ret\n [:VAR_OPEN, ret]\n elsif ret = @src.scan(VAR_CLOSE_RE)\n @pre << ret\n [:VAR_CLOSE, ret]\n elsif ret = @src.scan(KBD_OPEN_RE)\n @pre << ret\n [:KBD_OPEN, ret]\n elsif ret = @src.scan(KBD_CLOSE_RE)\n @pre << ret\n [:KBD_CLOSE, ret]\n elsif ret = @src.scan(INDEX_OPEN_RE)\n @pre << ret\n [:INDEX_OPEN, ret]\n elsif ret = @src.scan(INDEX_CLOSE_RE)\n @pre << ret\n [:INDEX_CLOSE, ret]\n elsif ret = @src.scan(REF_OPEN_RE)\n @pre << ret\n [:REF_OPEN, ret]\n elsif ret = @src.scan(REF_CLOSE_RE)\n @pre << ret\n [:REF_CLOSE, ret]\n elsif ret = @src.scan(FOOTNOTE_OPEN_RE)\n @pre << ret\n [:FOOTNOTE_OPEN, ret]\n elsif ret = @src.scan(FOOTNOTE_CLOSE_RE)\n @pre << ret\n [:FOOTNOTE_CLOSE, ret]\n elsif ret = @src.scan(VERB_OPEN_RE)\n @pre << ret\n [:VERB_OPEN, ret]\n elsif ret = @src.scan(VERB_CLOSE_RE)\n @pre << ret\n [:VERB_CLOSE, ret]\n elsif ret = @src.scan(BAR_RE)\n @pre << ret\n [:BAR, ret]\n elsif ret = @src.scan(QUOTE_RE)\n @pre << ret\n [:QUOTE, ret]\n elsif ret = @src.scan(SLASH_RE)\n @pre << ret\n [:SLASH, ret]\n elsif ret = @src.scan(BACK_SLASH_RE)\n @pre << ret\n [:BACK_SLASH, ret]\n elsif ret = @src.scan(URL_RE)\n @pre << ret\n [:URL, ret]\n elsif ret = @src.scan(OTHER_RE)\n @pre << ret\n [:OTHER, ret]\n else\n ret = @src.rest\n @pre << ret\n @src.terminate\n [:OTHER, ret]\n end\nend", "def get_procedure_literal\r\n save, @buffer = @buffer, \"\"\r\n open_procedure_literal\r\n\r\n begin\r\n token = get_procedure_token\r\n due_token(token)\r\n end until token.has_tag?(:end)\r\n\r\n close_procedure_literal\r\n (_, @buffer = @buffer, save)[0]\r\n end", "def next_item\n lexeme, token = @lexer.next, nil\n if lexeme[0].nil?\n token = { type: :eof }\n elsif lexeme[0].lol_string?\n token = { type: :string, data: lexeme[0][1..-2] }\n elsif lexeme[0].lol_integer?\n token = { type: :integer, data: lexeme[0].to_i }\n elsif lexeme[0].lol_float?\n token = { type: :float, data: lexeme[0].to_f }\n elsif lexeme[0].lol_boolean?\n token = { type: :boolean, data: (lexeme[0] == 'WIN') }\n elsif lexeme[0] == '!'\n token = { type: :exclamation }\n elsif lexeme[0] == \"\\n\"\n token = { type: :newline }\n else\n # Try to match keyword\n token_type = match_longest(lexeme[0], @token_table)\n unless token_type.nil?\n token = { type: token_type }\n # Consume all peeked lexemes\n token_type.to_s.count('_').times { @lexer.next }\n else\n # Try to match identifier\n if lexeme[0].lol_identifier?\n token = { type: :identifier, data: lexeme[0] }\n end\n end\n end\n raise UnknownTokenError.new(lexeme) if token.nil?\n token.merge(line: lexeme[1], pos: lexeme[2])\n end", "def peek\n @tokens[@pos]\n end", "def next()\n if has_next()\n @strings[0][1]-=1\n c = @strings[0][0]\n while has_next() and @strings[0][1] == 0\n @strings.shift\n end\n return c\n end\n return \" \"\n end", "def process_lit(exp)\n exp.shift\n value = exp.shift\n\n if value.is_a?(Numeric) && [email protected]?(value)\n @file.magic_numbers << MagicNumber.new(:value => value, :line => exp.line)\n end\n\n s()\n end", "def peek_lit(hint)\n pos0 = @pos\n while lit = next_input_element(hint) and (lit.ws? or lit.lt?)\n end\n @pos = pos0\n lit\n end", "def literals_list\n @literals ||= \"\"\n end", "def next_token\n return if @scanner.eos?\n\n if @scanner.scan(SKIP_PATTERN)\n @column += @scanner[:before].length\n\n new_lines = @scanner[:new_line].delete(\"\\r\")\n unless new_lines.empty?\n @lineno += new_lines.length\n @column = 0\n end\n\n @column += @scanner[:after].length\n end\n\n token =\n case\n when try_match(REFERENCE_PATTERN)\n Token.new :REFERENCE, @scanner[:identifier], @lineno, @column\n when try_match(PATH_PATTERN)\n Token.new :PATH, @scanner[:identifier], @lineno, @column\n when try_match(FILTER_PATTERN) && @scanner.check(OPEN_PAREN_PATTERN)\n Token.new :FILTER, \"?\", @lineno, @column\n when try_match(OPEN_BRACKET_PATTERN)\n @state_stack.push Token.new :OPEN_BRACKET, \"[\", @lineno, @column\n @state_stack.last\n when try_match(OPEN_PAREN_PATTERN)\n @state_stack.push Token.new :OPEN_PAREN, \"(\", @lineno, @column\n @state_stack.last\n when try_match(CLOSE_BRACKET_PATTERN)\n last = @state_stack.pop\n unless last\n raise TokenizeError.unexpected(\"]\", @lineno, @column)\n end\n unless last.type == :OPEN_BRACKET\n raise TokenizeError.unbalanced(\"[\", last.lineno, last.column)\n end\n Token.new :CLOSE_BRACKET, \"]\", @lineno, @column\n when try_match(CLOSE_PAREN_PATTERN)\n last = @state_stack.pop\n unless last\n raise TokenizeError.unexpected(\")\", @lineno, @column)\n end\n unless last.type == :OPEN_PAREN\n raise TokenizeError.unbalanced(\"(\", last.lineno, last.column)\n end\n Token.new :CLOSE_PAREN, \")\", @lineno, @column\n when try_match(SELF_PATTERN)\n Token.new :SELF, \"@\", @lineno, @column\n when try_match(NUMBER_PATTERN)\n Token.new :NUMBER, BigDecimal.new(@last_captured), @lineno, @column\n when try_match(STRING_PATTERN)\n Token.new :STRING, @scanner[:str], @lineno, @column\n when try_match(TRUE_PATTERN)\n Token.new :BOOLEAN, true, @lineno, @column\n when try_match(FALSE_PATTERN)\n Token.new :BOOLEAN, false, @lineno, @column\n when try_match(COLON_PATTERN)\n Token.new :COLON, \":\", @lineno, @column\n when try_match(COMMA_PATTERN)\n Token.new :COMMA, \",\", @lineno, @column\n when try_match(ADD_PATTERN)\n Token.new :ADD, \"+\", @lineno, @column\n when try_match(SUBTRACT_PATTERN)\n case @tokens.last&.type\n when nil, :OPEN_PAREN, :OPEN_BRACKET, :COMMA, :COLON, :POW, :MOD, :ADD, :SUBTRACT, :MULTIPLY, :DIVIDE\n if @scanner.check(NUMBER_PATTERN) ||\n @scanner.check(REFERENCE_PATTERN) ||\n @scanner.check(SUBTRACT_PATTERN) ||\n @scanner.check(OPEN_PAREN_PATTERN)\n Token.new :UMINUS, \"-\", @lineno, @column\n else\n raise TokenizeError.unexpected(\"-\", @lineno, @column)\n end\n else\n Token.new :SUBTRACT, \"-\", @lineno, @column\n end\n when try_match(MULTIPLY_PATTERN)\n Token.new :MULTIPLY, \"*\", @lineno, @column\n when try_match(DIVIDE_PATTERN)\n Token.new :DIVIDE, \"/\", @lineno, @column\n when try_match(POW_PATTERN)\n Token.new :POW, \"^\", @lineno, @column\n when try_match(MOD_PATTERN)\n Token.new :MOD, \"%\", @lineno, @column\n when try_match(EQUAL_TO_PATTERN)\n Token.new :EQUAL_TO, \"==\", @lineno, @column\n when try_match(NOT_EQUAL_TO_PATTERN)\n Token.new :NOT_EQUAL_TO, \"!=\", @lineno, @column\n when try_match(GREATER_THAN_OR_EQUAL_TO_PATTERN)\n Token.new :GREATER_THAN_OR_EQUAL_TO, \">=\", @lineno, @column\n when try_match(GREATER_THAN_PATTERN)\n Token.new :GREATER_THAN, \">\", @lineno, @column\n when try_match(LESS_THAN_OR_EQUAL_TO_PATTERN)\n Token.new :LESS_THAN_OR_EQUAL_TO, \"<=\", @lineno, @column\n when try_match(LESS_THAN_PATTERN)\n Token.new :LESS_THAN, \"<\", @lineno, @column\n when try_match(AND_PATTERN)\n Token.new :AND, \"&&\", @lineno, @column\n when try_match(OR_PATTERN)\n Token.new :OR, \"||\", @lineno, @column\n when try_match(NOT_PATTERN)\n Token.new :NOT, \"!\", @lineno, @column\n when try_match(INTERSECT_PATTERN)\n Token.new :INTERSECT, \"&\", @lineno, @column\n when try_match(UNION_PATTERN)\n Token.new :UNION, \"|\", @lineno, @column\n when try_match(IDENTIFIER_PATTERN) && @scanner.check(OPEN_PAREN_PATTERN)\n unless @scanner.check(OPEN_PAREN_PATTERN)\n raise TokenizeError.unexpected(@scanner.peek(7), @lineno, @column)\n end\n Token.new :FUNCTION, @last_captured, @lineno, @column\n else\n raise TokenizeError.unexpected(@scanner.peek(7), @lineno, @column)\n end\n\n @column += @last_captured.length\n @tokens << token\n\n token\n end" ]
[ "0.6587464", "0.6517174", "0.6437756", "0.640805", "0.63313323", "0.62394625", "0.6230809", "0.6162557", "0.61176914", "0.6086427", "0.6084447", "0.60331476", "0.6032339", "0.6005101", "0.5995011", "0.5986573", "0.596949", "0.596949", "0.596949", "0.5933468", "0.5915246", "0.5905661", "0.5903815", "0.58884794", "0.588634", "0.588634", "0.588634", "0.5817665", "0.5773458", "0.57713896", "0.5758676", "0.5748697", "0.5746873", "0.5737846", "0.5732671", "0.56790406", "0.56708676", "0.567049", "0.5667914", "0.564501", "0.56440294", "0.56438667", "0.5643011", "0.5623165", "0.5615541", "0.5615541", "0.5615541", "0.56050265", "0.5598654", "0.5578708", "0.5574778", "0.55654776", "0.5542215", "0.5542215", "0.5542215", "0.5542215", "0.55324936", "0.5527652", "0.5527652", "0.5527652", "0.5527652", "0.5520846", "0.55096656", "0.55065906", "0.55061936", "0.550464", "0.54959357", "0.54867685", "0.5484406", "0.54721296", "0.5471877", "0.54618186", "0.5461541", "0.5461541", "0.5461541", "0.5457239", "0.54487383", "0.5438274", "0.5438274", "0.5438274", "0.5437557", "0.5437108", "0.5435805", "0.54293483", "0.5416682", "0.5416682", "0.5416682", "0.54051393", "0.5399198", "0.5399198", "0.5396316", "0.53908646", "0.5387748", "0.537296", "0.53729135", "0.5371593", "0.5361546", "0.5358537", "0.53548634", "0.53516215", "0.5346582" ]
0.0
-1
break => position is rewind, then break with return => position is rewind, then return next => position is not rewind, then break with
def eval_lit(&block) begin saved_pos = @pos @eval_nest += 1 ret = yield ensure @eval_nest -= 1 if ret.nil? @pos = saved_pos nil else if @eval_nest == 0 #STDERR.puts "clear_cache [#{saved_pos}..#{@pos}]" clear_cache end end end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next\n last? ? nil : locate + 1\n end", "def next\n at(position + 1)\n end", "def next?()\n !end?\n end", "def next(pointer); end", "def next(pointer); end", "def next\n peek.tap { @position += 1 }\n end", "def seek\n while @next < @n && @ptns[@next].size == @nums[@next]\n @next += 1\n end\n @next < @n # return true/false\n end", "def rewind() end", "def rewind() end", "def rewind() end", "def rewind() end", "def next!() end", "def advance\n @current += 1 unless at_end?\n return previous\n end", "def move \n while true \n choose_next_position \n if [:found, :fail].include? @result\n break \n end\n end\n end", "def next_starting_position\n\t\tpos = @starting_positions[@pos_idx]\n\t\t@pos_idx = (@pos_idx + 1) % @starting_positions.length\n\t\tpos\n\tend", "def next() end", "def next() end", "def advance; end", "def advance; end", "def advance; end", "def has_next?; end", "def each\n while true do\n yield\n break if ! advance\n end\n end", "def tell; @next_block_pos end", "def moveto_next\n return if @range.empty?\n if index = @range.index(self.pos)\n @index = index\n end\n @index += 1\n @index %= @range.size\n moveto @range[@index]\n end", "def next()\n\n @filehandler.pos = @list_of_positions[@current_iteration]\n\n if @list_of_positions.length > @current_iteration\n @current_iteration += 1\n end\n\n make_entry()\n\n end", "def next()\n result = current\n @index += 1\n @got_next_element = false\n @next_element = nil\n result\n end", "def next_direction #:nodoc:\n loop do\n direction = @tries.pop\n nx, ny = @maze.move(@x, @y, direction)\n\n if @maze.valid?(nx, ny) && (@maze[@x, @y] & (direction | (direction << Maze::UNDER_SHIFT)) == 0)\n if @maze[nx, ny] == 0\n return direction\n elsif [email protected]?(@maze[nx, ny]) && @maze.weave > 0 && rand(100) < @maze.weave\n # see if we can weave over/under the cell at (nx,ny)\n return direction if @maze.weave_allowed?(@x, @y, nx, ny, direction)\n end\n end\n\n while @tries.empty?\n if @stack.empty?\n @pending = false\n return nil\n else\n @x, @y, @tries = @stack.pop\n end\n end\n end\n end", "def next\n next? ? @current + 1 : nil\n end", "def next\n self.offset(1)\n end", "def rewind; end", "def rewind; end", "def rewind; end", "def next\n last? ? nil : @collection[index + 1]\n end", "def iterator?() end", "def rewind\n @pos = 0\n end", "def rewind\n @pos = 0\n end", "def advance\n return if @current or @hidden\n @current_pos += 1.5\n if @current_pos >= @lines.last.length then\n @current = true\n end\n end", "def test_goto_position_backwards\n @buffer = Buffer.new 'hellow world'\n @buffer.fin\n @buffer.goto_position 5\n assert_eq @buffer.position, 5\n assert_eq @buffer.at, 'w'\n end", "def go_to_next(base_ary, position)\n ary = (base_ary + [position]).sort.reverse\n idx = ary.index(position)\n base_ary.reverse[idx]\n end", "def rewind\n @cursor = 0\n end", "def rewind\n @pos = @startpos\n @eof = false\n end", "def rewind; begin!; self end", "def next_match char\n data = get_content\n row = focussed_index + 1\n row.upto(data.length-1) do |ix|\n val = data[ix].chomp rescue return # 2010-01-05 15:28 crashed on trueclass\n #if val[0,1] == char #and val != currval\n if val[0,1].casecmp(char) == 0 #AND VAL != CURRval\n return ix\n end\n end\n row = focussed_index - 1\n 0.upto(row) do |ix|\n val = data[ix].chomp\n #if val[0,1] == char #and val != currval\n if val[0,1].casecmp(char) == 0 #and val != currval\n return ix\n end\n end\n return -1\n end", "def next\n\t\t@ibuf = (@ibuf+1).modulo(@nbuf)\n\t\t@buffers[@ibuf]\n\tend", "def intend_next\n intend_with :first, :next\n end", "def intend_next\n intend_with :first, :next\n end", "def return_next_match binding, *args\n first = nil\n ix = 0\n @view.each_with_index do |elem, ii|\n next unless binding.call(elem, *args)\n\n first ||= ii\n if ii > @cursor\n ix = ii\n break\n end\n end\n return first if ix == 0\n\n ix\nend", "def rewind()\n #This is a stub, used for indexing\n end", "def peek_next()\n return nil if @at_end\n\n begin\n @reader.peek\n rescue StopIteration\n nil\n end\n end", "def find_beeper()\n while not next_to_a_beeper?()\n move_toward_beeper()\n end\n end", "def next!\n rotate!.first\n end", "def _skip_next\n @index += 1\n\n self\n end", "def reverse_each_from_current_pos\n return to_enum(:reverse_each_from_current_pos) unless block_given?\n\n # read the rest of the current line, in case we started in the middle of a line\n start_pos = pos\n fragment = readline rescue \"\"\n seek(start_pos)\n\n while data = reverse_read(4096)\n lines = data.each_line.to_a\n lines.last << fragment unless lines.last[-1] == \"\\n\"\n\n fragment = lines.first\n\n lines[1..-1].reverse_each { |line| yield line }\n end\n\n yield fragment\n end", "def next(current)\n if (@position < @history.length - 1)\n @position = @position + 1\n return @history[@position]\n end\n return current\n end", "def read_next()\n return nil if @at_end\n\n begin\n pos = @marker.position\n\n if @marker.character == ?\\n\n pos.line += 1\n pos.column = 0\n end\n\n @marker.character = @reader.next\n @marker.source_index += 1\n pos.column += 1\n rescue StopIteration\n @at_end = true\n @marker.character = nil\n end\n\n @marker.character\n end", "def next_skip_line_number\n first = self.next\n first.type == :integer ? self.next : first\n end", "def run( current, positions, iterations, num_values )\n iterations.times do \n slice_start = current.next\n current.next = current.next.next.next.next\n destination = nil\n backoff = 1\n \n while ! destination \n search_for_label = current.value - backoff\n if search_for_label < 1\n search_for_label += num_values\n end \n destination = positions[search_for_label]\n if destination.value == slice_start.value ||\n destination.value == slice_start.next.value ||\n destination.value == slice_start.next.next.value \n destination = nil\n backoff += 1\n end\n end\n \n after_slice = destination.next\n destination.next = slice_start\n slice_start.next.next.next = after_slice\n current = current.next\n end\nend", "def next_offset\n next_offset = offset + limit\n return nil if next_offset >= total\n\n next_offset\n end", "def next\n\n if (@local_iterator && @local_iterator.has_next?)\n @local_iterator.get_next\n else\n nil\n end\n\n end", "def fwd_after_peek\n @pos = @head_pos\n end", "def goto_next\n pointer, matching_pointers = @itr_pointer.next(@builder['skip_matched_checkbutton'].active?, true)\n if (pointer == nil)\n if @single\n self.destroy\n else\n @itr_pointer.reset\n pointer, matching_pointers = @itr_pointer.next(@builder['skip_matched_checkbutton'].active?, true)\n if (pointer == nil)\n notify('No more items to compare')\n else\n @idx_comparison = 0\n notify('Cycling items to compare from the beginning')\n end\n end\n else\n @idx_comparison += 1\n notify('')\n end\n load_comparison(pointer, matching_pointers)\n end", "def next!\r\n @cur = @cache[@idx+=1]\r\n end", "def _find_next regex=@last_regex, start = @search_found_ix \n raise \"No previous search\" if regex.nil?\n #$log.debug \" _find_next #{@search_found_ix} : #{@current_index}\"\n fend = @list.size-1\n if start != fend\n start += 1 unless start == fend\n @last_regex = regex\n @search_start_ix = start\n regex = Regexp.new(regex, Regexp::IGNORECASE) if @search_case\n start.upto(fend) do |ix| \n row = @list[ix]\n m=row.match(regex)\n if !m.nil?\n @find_offset = m.offset(0)[0]\n @find_offset1 = m.offset(0)[1]\n @search_found_ix = ix\n return ix \n end\n end\n end\n fend = start-1\n start = 0\n if @search_wrap\n start.upto(fend) do |ix| \n row = @list[ix]\n m=row.match(regex)\n if !m.nil?\n @find_offset = m.offset(0)[0]\n @find_offset1 = m.offset(0)[1]\n @search_found_ix = ix\n return ix \n end\n end\n end\n return nil\n end", "def goto_end\n $multiplier ||= 0\n if $multiplier > 0\n goto_line $multiplier - 1\n return\n end\n @current_index = @list.count() - 1\n @prow = @current_index - @scrollatrows\n $multiplier = 0\n end", "def next\n\t\tlines.shift\n\tend", "def return_next_match binding, *args\n first = nil\n ix = 0\n $view.each_with_index do |elem,ii|\n if binding.call(elem, *args)\n first ||= ii\n if ii > $cursor \n ix = ii\n break\n end\n end\n end\n return first if ix == 0\n return ix\nend", "def return_next_match binding, *args\n first = nil\n ix = 0\n $view.each_with_index do |elem,ii|\n if binding.call(elem, *args)\n first ||= ii\n if ii > $cursor \n ix = ii\n break\n end\n end\n end\n return first if ix == 0\n return ix\nend", "def rewind\n end", "def rewind\n end", "def rewind\n end", "def rewind\n end", "def next\n if self.last?\n first_in_section\n else lower_item\n end\n end", "def next_hack(n)\n return n.next if hack? n.next\n next_hack n.next\nend", "def almost_rewind(f)\n f.seek(2)\nend", "def rewind\n @pos = 0\n self\n end", "def next_is? &block\n last? ? nil : yield(element, self.next)\n end", "def get_next_entry; end", "def each\n position = (0..@max_size - 1)\n\n loop do\n match = @mapper.call(@buffer[position])\n\n if match.nil?\n position = (position.begin..position.end - 1)\n position = slide_unmatched_window(position) if position.end < position.begin\n else\n yield match\n position = slide_matched_window(position)\n end\n\n break if position.first >= @buffer.size\n end\n end", "def move_next\n\t\tself.current = self.current&.next\n\tend", "def move_next\n\t\tself.current = self.current&.next\n\tend", "def next_element\n validates_possibility_of :next\n self.index += 1\n self.current\n end", "def seek_landmarks(tokenstream)\n @landmarks.each do |landmark|\n unless tokenstream.skip_to(*landmark)\n return nil\n end\n end\n return tokenstream.cur_pos\n end", "def test_next_statement\n i = 0\n result = []\n while i < 10\n i += 1\n next if (i % 2) == 0\n result << i\n end\n assert_equal (1..10).step(2).to_a, result\n end", "def next_offset\n [all_contents.size + 1, offset + limit].min\n end", "def rewind\n end", "def current; peek(0) end", "def rewind\n @offset = 0\n end", "def move_to_next_line()\r\n while @seek_ptr < @len && @fileBuf.at(@seek_ptr) != \"\\n\"\r\n @seek_ptr = @seek_ptr + 1\r\n end\r\n end", "def next_match char\n data = get_content\n row = focussed_index\n currval = data[row].chomp\n row.upto(data.length-1) do |ix|\n val = data[ix].chomp\n if val[0,1] == char and val != currval\n return ix\n end\n end\n 0.upto(row) do |ix|\n val = data[ix].chomp\n if val[0,1] == char and val != currval\n return ix\n end\n end\n return -1\n end", "def goto_last_position\n return unless @oldrow\n @current_index = @oldrow\n bounds_check\n end", "def find_next_in(parent, index)\n loop do\n while (c = parent.blocks[index])\n return c if c.context == :section && c.level <= @chunk_level\n\n index += 1\n end\n return unless parent.parent\n return unless (index = parent.parent.blocks&.find_index parent)\n\n parent = parent.parent\n index += 1\n end\n end", "def count_next_index()\n if @operation == :pour\n if @second_index + 1 >= @original_state.size\n @second_index = 0\n @first_index += 1\n else\n @second_index += @second_index + 1 == @first_index ? 2 : 1\n end\n if @first_index + 1 == @original_state.size && @second_index + 1 >= @original_state.size\n @operation = :nop\n end\n else\n if @first_index + 1 < @original_state.size\n @first_index += 1\n else\n @first_index = 0\n @operation = @operation == :fill ? :empty : :pour\n end\n end\n end", "def starting_position; end", "def has_next()\n \n end", "def has_next()\n \n end", "def next!\n return nil if @players.empty?\n \n begin\n @players[@next % @players.size]\n ensure\n skip! # shift the next pointer\n end\n end", "def next_match char\n start = @current_index\n start.upto(@list.length-1) do |ix|\n if @list[ix][0,1].casecmp(char) == 0\n return @list[ix] unless @list[ix] == @buffer\n end\n @current_index += 1\n end\n ## could not find, start from zero\n @current_index = 0\n start = [@list.length()-1, start].min\n 0.upto(start) do |ix|\n if @list[ix][0,1].casecmp(char) == 0\n return @list[ix] unless @list[ix] == @buffer\n end\n @current_index += 1\n end\n @current_index = [@list.length()-1, @current_index].min\n return nil\n end", "def next_match char\n start = @current_index\n start.upto(@list.length-1) do |ix|\n if @list[ix][0,1].casecmp(char) == 0\n return @list[ix] unless @list[ix] == @buffer\n end\n @current_index += 1\n end\n ## could not find, start from zero\n @current_index = 0\n start = [@list.length()-1, start].min\n 0.upto(start) do |ix|\n if @list[ix][0,1].casecmp(char) == 0\n return @list[ix] unless @list[ix] == @buffer\n end\n @current_index += 1\n end\n @current_index = [@list.length()-1, @current_index].min\n return nil\n end", "def rewind\n @list_index = 1\n @co_index = 1\n \n nil\n end", "def move_to_first_open_position\n valid_positions.shuffle.each do |coordinate_array|\n if maze[coordinate_array[0]][coordinate_array[1]] == open_value\n if @maze[leading_x][leading_y] == open_value\n @maze[leading_x][leading_y] = path_value\n elsif\n @maze[leading_x][leading_y] = visited_value\n end\n @leading_x = coordinate_array[0]\n @leading_y = coordinate_array[1]\n return\n end\n end\n valid_positions.shuffle.each do |coordinate_array|\n if maze[coordinate_array[0]][coordinate_array[1]] == path_value\n @maze[leading_x][leading_y] = visited_value\n @leading_x = coordinate_array[0]\n @leading_y = coordinate_array[1]\n break\n end\n end\n end", "def next(pointer)\n super\n \n raise 'Not implemented'\n end" ]
[ "0.64874", "0.63333476", "0.63201773", "0.626301", "0.626301", "0.6254629", "0.6173073", "0.6171248", "0.6171248", "0.6171248", "0.6171248", "0.6102457", "0.6062894", "0.60593617", "0.604659", "0.6034351", "0.6034351", "0.59989417", "0.59989417", "0.59989417", "0.59680724", "0.5938369", "0.59288025", "0.59220856", "0.59093684", "0.5881777", "0.58785665", "0.5869733", "0.58648086", "0.5863157", "0.5863157", "0.5863157", "0.5858482", "0.58517885", "0.58399034", "0.58399034", "0.58243126", "0.581632", "0.5807736", "0.5804666", "0.5800697", "0.5795618", "0.57785195", "0.57568216", "0.5740958", "0.5740958", "0.5739011", "0.5729338", "0.5728081", "0.57199603", "0.5696548", "0.5692039", "0.56823796", "0.56736344", "0.56732583", "0.56716454", "0.56700665", "0.5668742", "0.5655717", "0.56518716", "0.564888", "0.5643922", "0.5634176", "0.5622802", "0.56210333", "0.56146944", "0.56146944", "0.56132764", "0.56132764", "0.56132764", "0.56132764", "0.5613114", "0.5603913", "0.5598839", "0.55983084", "0.5596062", "0.5594284", "0.5590982", "0.55897546", "0.55897546", "0.55895054", "0.5588259", "0.5580424", "0.55796516", "0.55756223", "0.5561946", "0.5560388", "0.55306894", "0.5526838", "0.55259925", "0.55231184", "0.5517873", "0.55145884", "0.5514388", "0.5514388", "0.55075055", "0.54859", "0.54859", "0.54811704", "0.54807776", "0.5467955" ]
0.0
-1
position to [row, col]
def row_col(pos) _pos = 0 row = 0 col = 1 @codes.each do |code| break if _pos >= pos if line_terminator?(code) row += 1 col = 0 else col += 1 end _pos += 1 end return [row+1, col+1] end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_position(row, col)\n if row.between?(0, 7) && col.between?(0, 7)\n @position[:row] = row\n\t @position[:col] = col\n\tend\n end", "def position\n [ @row_offset, @col_offset ]\n end", "def position\n [row.position, column.position]\n end", "def move row, col\n @row = row\n @col = col\n end", "def position\n [ChessBoard.col_index(pos[0]), pos[1].to_i]\n end", "def move_to(row, col)\n @row, @col = wrap(row, col)\n end", "def coord_to_pos(row,col)\n return nil if row.even? && col.even? || row.odd? && col.odd? #these are always empty squares in checkers!\n return row*4+(col/2).floor+1\n end", "def pos=(val)\n #setter method for position \n row, col = val\n @pos = val\n\n end", "def position (r,c)\n if (0...ROWS).member? r\n if (0...COLS).member? c\n {row: r, col: c, pos: COLS*r + c}\n end\n end\nend", "def to_row_col(pos)\n\t row = (pos - 1) / @cols\n\t row = ((0...@rows).include? row) ? row : nil \n\t col = (pos - 1) % @cols\n\t col = ((0...@cols).include? col) ? col : nil\n\t [row, col]\n\tend", "def position(pos)\n @cells[to_index(pos)]\n end", "def image_position\n (@row * 6) + @column\n end", "def place position\n x,y = position\n\n leftmost = most(position, Left)\n rightmost= most(position, Right)\n topmost= most(position, Top)\n bottommost= most(position, Bottom)\n\n (leftmost[0]..rightmost[0]).each {|x| @horizontal.delete [x,y]}\n (topmost[1]..bottommost[1]).each {|y| @vertical.delete [x,y]}\n\n @horizontal[leftmost] = rightmost[0]\n @vertical[topmost] = bottommost[1] \n end", "def line_and_column(pos); end", "def setpos r=@row, c=@col\n #$log.debug \"setpos : (#{self.name}) #{r} #{c} XXX\"\n ## adding just in case things are going out of bounds of a parent and no cursor to be shown\n return if r.nil? or c.nil? # added 2009-12-29 23:28 BUFFERED\n return if r<0 or c<0 # added 2010-01-02 18:49 stack too deep coming if goes above screen\n @window.wmove r,c\n end", "def [](pos)\n row = pos.first\n col = pos.last\n @grid[row][col]\n end", "def position\n [@x, @y]\n end", "def go_to(rows,cols,face=nil)\n \tface = 'north' if (rows == 0 && cols == 0)\n @position.change ({:x => rows, :y => cols, :face => face.to_sym}) if face\n end", "def rowcol\n return self.row+@row_offset, self.col+@col_offset\n end", "def column_for_position(position); end", "def column_for_position(position); end", "def column_for_position(position); end", "def move_by(row_delta, col_delta)\n @row, @col = wrap(@row + row_delta, @col + col_delta)\n end", "def position\n [x, y]\n end", "def position(location)\n @cells[location.to_i - 1]\n end", "def position(location)\n @cells[location.to_i - 1]\n end", "def get_coordinates(pos)\n row = (pos / @width) + 1\n col = (pos % @width) + 1\n [row, col]\n end", "def[]=(pos, value)\n row = pos.first\n col = pos.last\n @grid[row][col] = value\n end", "def position(row, column)\n\t\tcolmn = get_column(column)\n\t\treturn colmn[row] unless colmn.nil?\n\tend", "def coord2pos(x, y)\n (y % h)*w+(x % w)\n end", "def set_cursor_position(row, col)\n\t\tinvalid_rows = [@cursor_row, row]\n\t\t@cursor_row, @cursor_col = row, col\n\t\tif @cursor_row < first_line_in_view\n\t\t\tset_contents_pos(0, line_num_to_coord(@cursor_row))\n\t\t\temit_changed(nil)\n\t\telsif @cursor_row > last_line_in_view\n\t\t\tset_contents_pos(0, line_num_to_coord(@cursor_row - num_lines_in_view))\n\t\t\temit_changed(nil)\n\t\tend\n\tend", "def [](pos)\n row, col = pos \n @grid[row][col]\n end", "def set_current_row_col(array)\r\n current_s_index = array.locate2d(' ')\r\n @current_row = current_s_index.flatten.first\r\n @current_col = current_s_index.flatten.last\r\n end", "def cal_pos\n x, y = map_location(@grid_x, @grid_y)\n x += @tile_size/2\n y += @tile_size/2\n [x,y]\n end", "def position\n\t\t[ @x, @y ]\n\tend", "def [](pos)\n row, col = pos\n @grid[row][col]\n end", "def [](pos)\n row, col = pos\n @grid[row][col]\n end", "def position(input)\n\t\tcells[input.to_i - 1]\n\tend", "def position(idx)\n pos = []\n\n dim.times do |i|\n if i < dim - 1\n row_length = @dimensions.drop(i+1).reduce(&:*)\n pos << idx / row_length\n idx -= pos.last * row_length\n else\n pos << idx\n end\n end\n\n pos\n end", "def moveto *args\n row,col = *({ :row => nil, :col => nil}.merge(args.first).values rescue args)\n cmd = (col and row) ? :cup : (col ? :hpa : (row ? :vpa : :home))\n __send__(cmd, [row, col].compact)\n end", "def xy_to_spot(x, y)\n spot = x + (y * @columns_size)\n end", "def position\n V[x, y]\n end", "def ensure_cursor_visible\n self.top_col = index if index < top_col\n self.bottom_col = index if index > bottom_col\n end", "def ensure_cursor_visible\n self.top_col = index if index < top_col\n self.bottom_col = index if index > bottom_col\n end", "def ensure_cursor_visible\n self.top_col = index if index < top_col\n self.bottom_col = index if index > bottom_col\n end", "def ensure_cursor_visible\r\n self.top_col = index if index < top_col\r\n self.bottom_col = index if index > bottom_col\r\n end", "def position(input)\n cells[self.idx(input)]\n end", "def [](pos)\n row, col = pos\n grid[row][col]\n end", "def [](pos)\n row, col = pos\n grid[row][col]\n end", "def position_coordinates(character)\n which_row = []\n which_cell = []\n (0...@n).each { |i| prepare_set(i, character, which_row, which_cell) }\n [which_row, which_cell]\n end", "def index_of_position(row, col = 0)\n\t\tline_index(row) + col + 1\n\tend", "def position(input)\n @cells[input.to_i - 1]\n end", "def rotate_cordinate_space\n Location.new(9 - @row, 9 - @col)\n end", "def position(input)\n self.cells[self.input_to_index(input)]\n end", "def c_bottomleft\n print cursor.column(0)\n print cursor.row(height)\n end", "def []=(pos, value)\n row, col = pos\n grid[row][col] = value\n end", "def [](pos)\n r, c = pos[0],pos[1]\n grid[r][c]\n end", "def set_starting_position\n start_rows = find_possible_centers(@matrix.length)\n start_columns = find_possible_centers(@matrix[0].length)\n determine_center_position(start_rows, start_columns)\nend", "def translate_index_to_grid_position(index)\n @grid_positions[index]\nend", "def place_piece piece, position\n @board[@@ROWS.index(position[0])][@@COLUMNS.index(position[1])] = piece\n end", "def position(input)\n self.cells[input.to_i - 1]\n end", "def position(input)\n @cells[input.to_i-1]\n end", "def [](pos)\n @grid[pos[0]][pos[1]]\n # row,col = pos\n # @grid[row][col]\n end", "def [](pos)\n if valid_pos?(pos)\n row, col = pos\n @grid[row][col]\n else\n puts \"Invalid position\"\n end\n end", "def location(x, y, w, h)\n return x - w/2, y + 20\n end", "def [](pos)\n x, y = pos.first, pos[1]\n @grid[x][y]\n end", "def position; end", "def position; end", "def position; end", "def position; end", "def position; end", "def position; end", "def position; end", "def position; end", "def c_topleft\n print cursor.column(0)\n print cursor.row(0)\n end", "def position(input)\n cells[input.to_i - 1]\n end", "def line_and_column(position = T.unsafe(nil)); end", "def get_index(position:)\n row = (position - 1) / 3\n col = (position - 1) % 3\n return row, col\n end", "def pos\n @layout[@pos - 1]\n end", "def []=(pos, figure)\n x, y = pos\n @rows[x][y] = figure\n end", "def [](pos) #pos = [1,2]\n # x, y = pos\n x = pos[0]\n y = pos[1]\n @rows[x][y] \n end", "def from row, col\n rect(col, row)\n end", "def pos\n [posx, posy]\n end", "def index(x, y)\n (y - 1) * width + (x - 1)\n end", "def position(input)\n cells[input.to_i-1]\n end", "def position(input)\n cells[input.to_i-1]\n end", "def position \n\t\treturn @y,@x\n\tend", "def [](row, col)\n @move[[row, col]]\n end", "def []=(pos, p)\n r, c = pos[0],pos[1]\n grid[r][c] = p\n end", "def index_line_and_col(index) \n\t\ti = 0\n\t\ti += 1 while index_of_position(i) <= index\n\t\ti -= 1\n\t\t[i, index - index_of_position(i)]\n\tend", "def [](row, col)\n #convert row values by * by 9\n @grid[row * 9 + col]\n end", "def set_screen_row_col top, left=-1\n @top = top\n @left = left unless left < 0\n end", "def svg_coord(row, col)\n\t\t[col*10 + 5, row*10 + 5]\n\tend", "def [](pos)\n row, col = pos[0], pos[1]\n @rows[row][col]\n end", "def set_cell(args)\n arr = @rows.flatten\n arr[arr.find_index(args[:position])] = args[:piece]\n @rows = arr.enum_for(:each_slice, 3).to_a\n end", "def next_position\n return unless placed?\n axis = case direction\n when 'east', 'west'; :x\n when 'north', 'south'; :y\n end\n amount = case direction\n when 'north', 'east'; +1\n when 'south', 'west'; -1\n end\n [@x + (axis == :x ? amount : 0), @y + (axis == :y ? amount : 0)]\n end", "def [](pos)\n row, col = pos\n @rows[row][col]\n end", "def [](pos)\n x, y = pos\n @rows[x][y]\n end", "def index_for(x, y, coordinate_system=:row_col)\n case coordinate_system\n when :row_col\n x * 9 + y\n when :col_row\n y * 9 + x\n when :box\n [0,3,6,27,30,33,54,57,60][x] + [0,1,2,9,10,11,18,19,20][y]\n end\n end", "def move_to(pos)\n if (@current_pos != nil && @current_pos == pos)\n return\n end\n if pos == 5\n self.x = (Graphics.width - self.width) / 2\n self.y = (Graphics.height - self.height) / 2\n end\n if [1, 2, 3].include?(pos)#bottom\n self.y = Graphics.height - self.height\n if @win_help != nil\n self.y -= @win_help.height\n end\n end\n if [1, 4, 7].include?(pos)#left\n self.x = 0\n end\n if [7, 8, 9].include?(pos)#top\n self.y = 0\n end\n if [3, 6, 9].include?(pos)#right\n self.x = Graphics.width - self.width\n end\n @current_pos = pos\n end", "def valid_position?(row, col)\n row.between?(0, 7) && col.between?(0, 7) ? true : false\n end" ]
[ "0.78684074", "0.7549625", "0.7248122", "0.7154973", "0.71535504", "0.7065209", "0.7061515", "0.7041118", "0.69991076", "0.6984821", "0.6839706", "0.67913014", "0.67176014", "0.6717497", "0.6716835", "0.6703385", "0.6648231", "0.6620229", "0.6580714", "0.65406114", "0.65406114", "0.65406114", "0.65342337", "0.64976555", "0.6471739", "0.6471739", "0.6466129", "0.6452217", "0.64417684", "0.64339375", "0.64212114", "0.6419838", "0.6415846", "0.64061534", "0.6398923", "0.6396761", "0.63885397", "0.63725924", "0.6366133", "0.63559854", "0.6355196", "0.63507456", "0.6321385", "0.6321385", "0.6321385", "0.6319858", "0.63096356", "0.6301185", "0.6301185", "0.6300693", "0.62915534", "0.628449", "0.6265511", "0.62625575", "0.62607086", "0.6259621", "0.6258895", "0.6257155", "0.6255857", "0.6246045", "0.62415826", "0.6234001", "0.6227886", "0.62057894", "0.62055725", "0.6205223", "0.6198553", "0.6198553", "0.6198553", "0.6198553", "0.6198553", "0.6198553", "0.6198553", "0.6198553", "0.6198479", "0.61979556", "0.6196362", "0.6196131", "0.61909163", "0.61861897", "0.6177591", "0.61572075", "0.61567926", "0.6151438", "0.61511487", "0.61511487", "0.6151071", "0.6150962", "0.61497796", "0.61384314", "0.6131698", "0.6124922", "0.61225307", "0.6122228", "0.6118095", "0.611706", "0.6111515", "0.61033297", "0.61010706", "0.6099072", "0.6098638" ]
0.0
-1
Returns string of input data around _pos_
def debug_str(pos = nil, row = 0, col = 0) if pos.nil? pos = @head_pos or @pos end t = '' if col >= 80 t << @codes[(pos-80)..(pos+80)].pack("U*") col = 81 else t << line(pos) end if col and col >= 1 col = col - 1; end t << "\n" t << (' ' * col) + "^" t end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def text_pos(pos = @pos)\t\n\t\treturn to_text(pos[0]) + (pos[1] + 1).to_s\n\tend", "def to_offset(text, position); end", "def to_s\n \"#{position[0]} #{position[1]} #{@direction}\"\n end", "def pos() end", "def pos() end", "def pos() end", "def pos() end", "def pos_to_index(position)\n position[1] * 8 + position[0]\n end", "def position_for(char)\n return ''.freeze if char.position.y == @y\n\n @y = char.position.y\n char.position.to_s\n end", "def to_text(single_pos)\t\n\t\t\t\t\tcase single_pos\n\t\t\t\t\t\twhen 0\n\t \t\tsingle_pos =\"A\"\n\t \t\twhen 1\n\t \t\tsingle_pos =\"B\"\n\t \t\twhen 2\n\t \t\t\tsingle_pos =\"C\"\n\t \t\twhen 3\n\t \t\tsingle_pos =\"D\"\n\t \t\twhen 4\n\t \t\tsingle_pos =\"E\"\n\t \t\twhen 5\n\t \t\tsingle_pos =\"F\"\n\t \t\twhen 6\n\t \t\tsingle_pos =\"G\"\n\t \t\twhen 7\n\t \t\tsingle_pos =\"H\"\n\t \tend\n\t \t\t\treturn single_pos\n\tend", "def pos\n io.pos\n end", "def read_at (pos)\r\n @data[pos]\r\n end", "def line_offset(pos=pos)\n p = 0\n string.each_line do |line|\n len = line.length\n return (pos - p) if p + len >= pos\n p += len\n end\n 0\n end", "def to_s\n \"Position <#{@row}, #{@col}>\"\n end", "def pos; end", "def pos; end", "def pos; end", "def pos; end", "def pos; end", "def pos; end", "def get_pos(pos)\n build_pos_hash if @pos_hash.empty?\n @pos_hash[pos_to_sym(pos)]\n end", "def line_offset(pos=pos())\n p = 0\n string.each_line do |line|\n len = line.length\n return (pos - p) if p + len >= pos\n p += len\n end\n 0\n end", "def to_s\n @position.to_s\n end", "def get_coordinates(pos)\n row = (pos / @width) + 1\n col = (pos % @width) + 1\n [row, col]\n end", "def pos\n @pos\n end", "def position_address\n \"#{@data['SIDO']} #{@data['L_SIGUN_GU']} #{@data['L_DONG']} #{@data['GIBUN']}\"\n end", "def pos; @_io.pos; end", "def to_pos\n return \"(N/A|N/A)\" if @x.nil? || @y.nil?\n return \"(#{@x}|#{@y})\"\n end", "def text_position\n end", "def text_x(pos)\n unless @text_pos\n @text_pos = []\n 0.upto(13) do |i|\n @text_pos[i] = x + i * char_spacing * xdim + (i >= 7 ? 6 : (i >= 1 ? 2 : 0))*xdim\n end\n end\n @text_pos[pos]\n end", "def line_and_column(pos); end", "def pos\n end", "def pos\n end", "def pos\n end", "def get_new_position line, start_pos, keypad\n line.each_char.with_object(start_pos.dup) do |c, new_pos|\n new_pos[1] -= 1 if c == \"U\" && new_pos[1].positive? && !keypad[new_pos[1] - 1][new_pos[0]].nil?\n new_pos[1] += 1 if c == \"D\" && new_pos[1] < (keypad.length - 1) && !keypad[new_pos[1] + 1][new_pos[0]].nil?\n new_pos[0] -= 1 if c == \"L\" && new_pos[0].positive? && !keypad[new_pos[1]][new_pos[0] - 1].nil?\n new_pos[0] += 1 if c == \"R\" && new_pos[0] < (keypad.length - 1) && !keypad[new_pos[1]][new_pos[0] + 1].nil?\n end\n end", "def line_for_position(position); end", "def line_for_position(position); end", "def line_for_position(position); end", "def position(index)\n return [-1, -1] unless index < string.length\n\n row_start = @line_starts.select { |line_start| line_start <= index }.last\n row = @line_starts.index(row_start)\n column = index - row_start\n [row, column]\n end", "def position; end", "def position; end", "def position; end", "def position; end", "def position; end", "def position; end", "def position; end", "def position; end", "def text_position=(pos)\n end", "def get_string(offset)\n @contents.pos = offset\n return @contents.gets\n end", "def pos=(pos); end", "def getPos _args\n \"getPos _args;\" \n end", "def tell\n @pos\n end", "def position_string\n # Oddly, String#delete is non-destructive, unlike Array#delete\n @padded_board_string.delete('*').scan(/..../).reverse.join\n end", "def read(pos)\n end", "def position(pos)\n @cells[to_index(pos)]\n end", "def positioned_message msg\n result = [msg]\n result << \"in file #{file}\" if file\n result << \"at line #{line}:#{pos}\" if line\n result.join(\" \")\n end", "def position_string(position, subpath = nil)\n \"might_config[#{(position + Array(subpath).join('/').split('/')).join('][')}]\"\n end", "def pos\n [posx, posy]\n end", "def pos_header\n @position - 2\n end", "def pos=(_arg0); end", "def position\n\t\treturn \"#{@x} #{@y} #{PlanetModel::ORIENTATIONS.invert[@angle.abs % 360].to_s}#{if @lost == true then ' LOST' end}\"\t\n\tend", "def decompose_position(position); end", "def decompose_position(position); end", "def decompose_position(position); end", "def pos1(pos)\n [pos.first + 1, pos.last + 1]\n end", "def text_coordinate\n return 39, 5, 222, 16\n end", "def pos\n @read\n end", "def pos()\n #This is a stub, used for indexing\n end", "def at(pos)\n pos = pos.to_int\n\n if 0 <= pos && !has_size?\n return read_access(pos)\n elsif 0 <= pos && pos < size\n return read_access(pos)\n elsif -size <= pos && pos < 0\n return read_access(size + pos)\n else\n return nil\n end\n end", "def input_to_index(position)\nindex = position.strip.to_i - 1\nreturn index\nend", "def to position\n self[0..position]\n end", "def position(input)\n\t\tcells[input.to_i - 1]\n\tend", "def line_char_to_offset(text, line, character); end", "def position(input)\n cells[input.to_i-1]\n end", "def position(input)\n cells[input.to_i-1]\n end", "def position(input)\n @cells[input.to_i - 1]\n end", "def get_str(offset, len)\n @buf[offset..offset+len-1]\n end", "def position(input)\n @cells[input.to_i-1]\n end", "def to_s\n return '<unknown node position>' unless (from, size = refs_at)\n\n \"#{'^'.rjust(from, ' ')}#{'-'.rjust(size, '-')}^\"\n end", "def pos\n @position[:current]\n end", "def index(str, pos)\n case\n when @left.size <= pos\n @right.index(str, pos - @left.size)\n when (ret = @left.index(str, pos))\n ret\n else\n self.each_char_from(pos){|c, i|\n }\n # Search is done for <= @left[]\n left_s = kkk\n left_p = pos\n end\n end", "def position(input)\n cells[input.to_i - 1]\n end", "def to_s\n \"[#{@pos_x}, #{@pos_y}, #{@angle}]\"\n end", "def findcoordinates(input)\n case input\n when \"^\"\n position = [0,1]\n when \"v\"\n position = [0,-1]\n when \">\"\n position = [1,0]\n when \"<\"\n position = [-1,0]\n end\nend", "def position(input)\n self.cells[input.to_i - 1]\n end", "def pos_of_member dn\n Rails.cache.fetch(\"#{cn}/#{dn}/pos\") do\n uid = dn.rdns.first[\"uid\"]\n post = positions.find{|u| u.split(\";\")[1] == uid}\n return \"\" if post.nil?\n post.split(\";\").first\n end\n end", "def get_line_pos(pos)\n lpos = @line_ends.bsearch_index { |x, _| x >= pos }\n return lpos\n end", "def cstr\n r = @data.unpack(\"@#{pos}Z*\")[0]\n @pos += r.bytesize + 1\n r\n end", "def pos_on_line(offset)\n end", "def bytepos; end", "def update_position\n chr = Console.read_char\n\n offset = arrow_val(chr)\n if offset\n new_position = Coord.sum(position, offset)\n self.position = new_position if in_range?(new_position)\n end\n\n chr\n end", "def position(input)\n cell_index_taken = input.to_i-1\n cells[cell_index_taken]\n end", "def parse_position(content)\n return content.lstrip.rstrip.to_i\n end", "def aligned_text_position=(pos)\n end", "def begin_pos; end", "def input_to_index (pos)\n pos = ((pos.to_i) - 1)\nend", "def pos\n file.pos\n end", "def pos()\n #This is a stub, used for indexing\n end", "def pos()\n #This is a stub, used for indexing\n end", "def pos\n @pos || uninitialized!\n end" ]
[ "0.729527", "0.7034711", "0.65570784", "0.6426135", "0.6426135", "0.6426135", "0.6426135", "0.63888615", "0.63688344", "0.6288623", "0.62551737", "0.62439734", "0.62432516", "0.62406826", "0.6224327", "0.6224327", "0.6224327", "0.6224327", "0.6224327", "0.6224327", "0.6215989", "0.6171418", "0.6149291", "0.61173475", "0.60959655", "0.60819244", "0.6038717", "0.60201454", "0.5991038", "0.595531", "0.5935942", "0.5915696", "0.5915696", "0.5915696", "0.59029824", "0.5873627", "0.5873627", "0.5873627", "0.5870963", "0.5863084", "0.5863084", "0.5863084", "0.5863084", "0.5863084", "0.5863084", "0.5863084", "0.5863084", "0.58356494", "0.58353126", "0.5805288", "0.5804737", "0.57981086", "0.57915676", "0.57850295", "0.5778055", "0.5775516", "0.5752457", "0.57100034", "0.5707748", "0.5703114", "0.57026863", "0.5692025", "0.5692025", "0.5692025", "0.56900704", "0.5683764", "0.56787217", "0.567212", "0.5671361", "0.56701386", "0.56683695", "0.566334", "0.5657771", "0.56492907", "0.56492907", "0.56469995", "0.5641896", "0.5631449", "0.5621543", "0.560917", "0.56074333", "0.5599514", "0.5599412", "0.5598576", "0.55976343", "0.559715", "0.5595588", "0.5595017", "0.55944824", "0.5583132", "0.5570249", "0.5569861", "0.5558092", "0.5538738", "0.55383074", "0.5537386", "0.552897", "0.5524968", "0.5524968", "0.5512253" ]
0.60360533
27
Override Thread.new to prevent threads being created if there isn't runtime support for it
def new(*args,&block) unless defined? @_rubycocoa_threads_allowed then # If user has explicilty disabled thread support, also disable the # check (for debugging/testing only) @_rubycocoa_threads_allowed = ENV['RUBYCOCOA_THREAD_HOOK_DISABLE'] || OSX::RBRuntime.isRubyThreadingSupported? end if !@_rubycocoa_threads_allowed then warn "#{caller[0]}: Ruby threads cannot be used in RubyCocoa without patches to the Ruby interpreter" end pre_rubycocoa_new(*args,&block) end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def allowing_other_threads; end", "def spawn_thread\n Thread.new{ run_thread }.tap do |t|\n t.abort_on_exception = true\n end\n end", "def start_thread #does this need to be its own thread?\n @@thread = Thread.new do\n self.run\n end\n end", "def do_not_thread; true; end", "def do_not_thread; true; end", "def do_not_thread; true; end", "def run(background = T.unsafe(nil), thread_name: T.unsafe(nil)); end", "def initialize(*args, &block)\n raise ArgumentError, 'block must be provided' unless block_given?\n args.push(block)\n\n thread_id = [0].pack('L')\n dwArgs = args.object_id\n \n @thread = CreateThread(\n nil, # Handle cannot be inherited\n 0, # Default stack size\n WinThreadFunc, # Pointer to thread func\n dwArgs, # Arguments passed to thread func \n 0, # Execute immediately, i.e. not suspended\n thread_id # Stores the thread id\n )\n \n if @thread == 0\n raise Error, get_last_error\n end\n \n @thread_id = thread_id.unpack('L')[0]\n end", "def do_not_thread\n true\n end", "def do_not_thread\n true\n end", "def in_new_thread; end", "def do_not_thread\n\n true\n end", "def _thread(release: false)\n Thread.new do\n yield\n rescue => e\n lex(e, 'Error in thread')\n nil\n ensure\n release_connection if release\n end\nrescue => e\n lex(e, 'Threading failed')\n nil\nend", "def initialize(obj, thread = :unbound)\n super(obj)\n __bind_to_thread(thread) if thread != :unbound\n end", "def do_not_thread\n true\n end", "def initialize(&blk)\n self.future_thread = Thread.new(&blk)\n end", "def method_missing(method_name, *args, &block)\n if @thread && @thread.respond_to?(method_name)\n @thread.__send__(method_name, *args, &block)\n else\n super(method_name, *args, &block)\n end\n end", "def create\n queue = Queue.new\n thread = Thread.new do\n while proc = queue.pop\n begin\n proc.call\n rescue ::Exception => ex\n Internals::Logger.crash(\"thread crashed\", ex)\n ensure\n put thread\n end\n end\n end\n\n thread[:celluloid_queue] = queue\n # @idle_threads << thread\n @group << thread\n thread\n end", "def create_remote_thread(h, start, arg)\r\n r = CALLS[\"kernel32!CreateRemoteThread:LLLLLLL=L\"].call(h, NULL, 0, start.to_i, arg.to_i, 0, 0)\r\n raise WinX.new(:create_remote_thread) if r == 0\r\n return r\r\n end", "def start_worker_thread\n @worker_threads << ControllableThread.new(@name + \"-worker\") {yield}\n end", "def new(&blk)\n memory = ::Libuv::Ext::LIBC.malloc(::Libuv::Ext.loop_size)\n ::Libuv::Ext.loop_init(memory)\n\n thread = create(memory)\n if block_given?\n ::Thread.new do\n thread.run &blk\n end\n end\n thread\n end", "def new\n @dl_thread = DlThread.new\n end", "def initialize_cleanup_thread(args = T.unsafe(nil)); end", "def initialize\n @threads = []\n end", "def threads\n raise CapabilitiesExceeded\n end", "def test_handles_multiple_threads\n threads = CW::Threads.new(self, [:a_thread, :sleep_thread])\n threads.start_threads\n assert threads.threads[0][:thread].is_a? Thread\n assert threads.threads[1][:thread].is_a? Thread\n assert_nil threads.threads[2]\n end", "def with_own_thread()\n true\n end", "def spawn(name, crit, *args, &block)\n\t\tt = nil\n\n\t\tif block\n\t\t\tt = ::Thread.new(name, crit, caller, block, *args) do |*argv|\n\t\t\t\t::Thread.current[:tm_name] = argv.shift.to_s\n\t\t\t\t::Thread.current[:tm_crit] = argv.shift\n\t\t\t\t::Thread.current[:tm_call] = argv.shift\n\t\t\t\t::Thread.current[:tm_time] = Time.now\n\n\t\t\t\tbegin\n\t\t\t\t\targv.shift.call(*argv)\n\t\t\t\trescue ::Exception => e\n\t\t\t\t\telog(\"thread exception: #{::Thread.current[:tm_name]} critical=#{::Thread.current[:tm_crit]} error:#{e.class} #{e} source:#{::Thread.current[:tm_call].inspect}\")\n\t\t\t\t\telog(\"Call Stack\\n#{e.backtrace.join(\"\\n\")}\")\n\t\t\t\t\traise e\n\t\t\t\tensure\n\t\t\t\t\tif framework.db and framework.db.active\n\t\t\t\t\t\t# NOTE: despite the Deprecation Warning's advice, this should *NOT*\n\t\t\t\t\t\t# be ActiveRecord::Base.connection.close which causes unrelated\n\t\t\t\t\t\t# threads to raise ActiveRecord::StatementInvalid exceptions at\n\t\t\t\t\t\t# some point in the future, presumably due to the pool manager\n\t\t\t\t\t\t# believing that the connection is still usable and handing it out\n\t\t\t\t\t\t# to another thread.\n\t\t\t\t\t\t::ActiveRecord::Base.connection_pool.release_connection\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\tend\n\t\telse\n\t\t\tt = ::Thread.new(name, crit, caller, *args) do |*argv|\n\t\t\t\t::Thread.current[:tm_name] = argv.shift\n\t\t\t\t::Thread.current[:tm_crit] = argv.shift\n\t\t\t\t::Thread.current[:tm_call] = argv.shift\n\t\t\t\t::Thread.current[:tm_time] = Time.now\n\t\t\t\t# Calling spawn without a block means we cannot force a database\n\t\t\t\t# connection release when the thread completes, so doing so can\n\t\t\t\t# potentially use up all database resources and starve all subsequent\n\t\t\t\t# threads that make use of the database. Log a warning so we can track\n\t\t\t\t# down this kind of usage.\n\t\t\t\tdlog(\"Thread spawned without a block!\")\n\t\t\t\tdlog(\"Call stack: \\n#{::Thread.current[:tm_call].join(\"\\n\")}\")\n\t\t\tend\n\t\tend\n\n\t\tself << t\n\t\tt\n\tend", "def thread=(_arg0); end", "def hsdq_start_thread(ignition)\n t = Thread.new(&ignition)\n p \"New thread: #{t}\"\n hsdq_threads_add t\n end", "def thread?\n false\n end", "def create_worker_thread\n wrkr = ThreadPoolWorker.new(@queue, self)\n Thread.new(wrkr, self) do |worker, parent|\n Thread.current.abort_on_exception = false\n worker.run\n parent.on_worker_exit(worker)\n end\n return wrkr\n end", "def spawn_threads!\n @threads = []\n num_worker_threads.times do |thread_num|\n log(:debug) { \"Spawning background worker thread #{thread_num}.\" }\n\n @threads << Thread.new do\n Thread.current[:influxdb] = object_id\n\n until @should_stop\n check_background_queue(thread_num)\n sleep rand(sleep_interval)\n end\n\n log(:debug) { \"Exit background worker thread #{thread_num}.\" }\n end\n end\n end", "def spawn_watch_thread\n @watch_thread = Thread.new do\n while true\n # If there are idle threads and we're above minimum\n if @queue.num_waiting > 0 && @worker_threads_count.value > @min_size # documented\n @killscore += THREADS_IDLE_SCORE * @queue.num_waiting\n \n # If there are no threads idle and we have room for more\n elsif(@queue.num_waiting == 0 && @worker_threads_count.value < @max_size) # documented\n @killscore -= THREADS_BUSY_SCORE * @queue.length\n \n else\n # Decay\n if @killscore != 0 # documented\n @killscore *= 0.9\n end\n if @killscore.abs < 1\n @killscore = 0\n end\n end\n if @killscore.abs >= @killthreshold\n @killscore > 0 ? kill_thread : spawn_thread\n @killscore = 0\n end\n Threadz.dputs \"killscore: #{@killscore}. waiting: #{@queue.num_waiting}. threads length: #{@worker_threads_count.value}. min/max: [#{@min_size}, #{@max_size}]\"\n sleep 0.1\n end\n end\n end", "def spawn\n raise \"not yet implemented\"\n end", "def fork_worker(after_requests = T.unsafe(nil)); end", "def allow_concurrency; end", "def allow_concurrency; end", "def start_new_thread(username, params = {})\n $LOG.i \"running \" + __method__.to_s\n @client.post '/mc/v1/threads/' + username, params\n end", "def initialize(_size, _thr_group = nil)\n @kill_worker_on_exception = false\n @size = 0\n @pool_mtx = Mutex.new\n @pool_cv = ConditionVariable.new\n @pool = []\n @dummy_grp = ThreadGroup.new\n @busy_grp = _thr_group.nil? ? ThreadGroup.new : _thr_group\n @create_on_spawn = false\n _size = 1 if _size < 1\n _size.times { create_worker }\n end", "def allow_concurrency=(_arg0); end", "def allow_concurrency=(_arg0); end", "def create_thread(list, action)\n Thread.new { action.call(list) }\n end", "def new_controller_thread # :nodoc:\n Thread.new {\n t2 = Thread.current\n t2.abort_on_exception = true\n yield\n }\n end", "def spawn_thread\n Thread.new do\n while true\n x = @queue.shift\n if x == Directive::SUICIDE_PILL\n \t@worker_threads_count.decrement\n \tThread.current.terminate\n end\n Thread.pass\n begin\n x.job.call(x)\n rescue StandardError => e\n $stderr.puts \"Threadz: Error in thread, but restarting with next job: #{e.inspect}\\n#{e.backtrace.join(\"\\n\")}\"\n end\n end\n end\n @worker_threads_count.increment\n end", "def initialize\n RJR::ThreadPool.num_threads ||= 20\n RJR::ThreadPool.timeout ||= 10\n @num_threads = RJR::ThreadPool.num_threads\n @timeout = RJR::ThreadPool.timeout\n @worker_threads = []\n\n @work_queue = Queue.new\n @running_queue = Queue.new\n\n @thread_lock = Mutex.new\n @terminate = true\n\n ObjectSpace.define_finalizer(self, self.class.finalize(self))\n end", "def _active_start!\n _log { \"\" }\n @mutex.synchronize do\n return self if @running || @thread || @stopped\n @stopped = false\n @thread = Thread.new do \n _log { \"Thread.new\" }\n @running = true\n Active.active_facades << self\n while @running && ! @stopped\n begin\n _active_dequeue.invoke! if @running && ! @stopped\n rescue Stop => exc\n _log { \"stopping via #{exc.class}\" }\n end\n end\n Active.active_facades.delete(self)\n _log { \"stopped\" }\n self\n end\n _log { \"@thread=@T#{@thread.object_id}\" }\n @thread\n end\n self\n end", "def thread(&block)\n warn 'A Block is Needed' unless block_given?\n Java::JavaLang::Thread.new(&block).start\n end", "def thread; end", "def thread; end", "def thread; end", "def initialize(threaded = true)\n super(threaded)\n @host = nil\n @port = nil\n end", "def use_threads?\n !defined?(VCR)\n end", "def wait_to_spawn_thread\n # Never spawn more than the specified maximum number of threads.\n until Thread.list.select {|thread| thread.status == \"run\"}.count <\n (1 + @options[:max_threads]) do\n # Wait 5 milliseconds before trying again.\n sleep 0.005\n end\n end", "def enable_call_threading\n @actor.enable_call_threading\n nil\n end", "def initialize(num_threads, args = {})\n @work_queue = Queue.new\n @running_queue = Queue.new\n\n @num_threads = num_threads\n @pool_lock = Mutex.new\n @worker_threads = []\n\n @timeout = args[:timeout]\n\n ObjectSpace.define_finalizer(self, self.class.finalize(self))\n end", "def initialize_threading\n @worker_pool = nil\n \n \n #\n # Define the entity thread, with its internal fibers\n # \n @stopped = true\n ent = self\n Thread.abort_on_exception = true\n @entity_thread = Thread.new { \n Thread.stop\n @worker_pool = FiberWorkerPool.new(ent)\n @event_fiber = Fiber.new {\n run_event_loop\n }\n \n @event_fiber.resume \n }\n\n @event_queue = Queue.new\n end", "def mythread(no)\n\ttid = Thread.new do\n\t\tfor i in 1..5\n\t\t\tprintf\"thread %d (%d)\\n\",no,i\n\t\t\tsleep 1\n\t\tend\n\tend\n\treturn tid\nend", "def background!(thread_name = nil)\n Thread.new do\n java.lang.Thread.currentThread.setName(thread_name) if thread_name\n begin\n @zk.reopen\n run\n @logger.info 'Main thread running in background'\n rescue => e\n @logger.fatal 'Unrecoverable worker exception: ', e\n @thread_status = :stopped\n end\n end\n end", "def spawn_monitor\n\t\t::Thread.new do\n\t\t\tbegin\n\n\t\t\t::Thread.current[:tm_name] = \"Thread Monitor\"\n\t\t\t::Thread.current[:tm_crit] = true\n\n\t\t\twhile true\n\t\t\t\t::IO.select(nil, nil, nil, 1.0)\n\t\t\t\tself.each_index do |i|\n\t\t\t\t\tstate = self[i].alive? rescue false\n\t\t\t\t\tself[i] = nil if not state\n\t\t\t\tend\n\t\t\t\tself.delete(nil)\n\t\t\tend\n\n\t\t\trescue ::Exception => e\n\t\t\t\telog(\"thread monitor: #{e} #{e.backtrace} source:#{self[:tm_call].inspect}\")\n\t\t\tend\n\t\tend\n\tend", "def blocking_thread\n worker_thread\n end", "def initialize(thread_name = ::RightScale::AgentConfig.default_thread_name, &continuation)\n super(&continuation)\n @active = false\n @thread = nil\n @thread_name = thread_name\n @pid = nil\n @mutex = Mutex.new\n @queue = Queue.new\n @sequence_finished = ConditionVariable.new\n end", "def initialize(name, group='Misc')\n @name = name\n @group = group\n @action_thread = nil\n @worker_threads = []\n super()\n end", "def running?; !!@thread end", "def _start_new_worker(worker)\n Thread.new do\n worker.work\n end\n end", "def alive?; @locker.synchronize { !! @threads }; end", "def mon_initialize\n if defined?(@mon_data) && @mon_data_owner_object_id == self.object_id\n raise ThreadError, \"already initialized\"\n end\n @mon_data = ::Thread::MonitorCore.new\n @mon_data_owner_object_id = self.object_id\n end", "def inherit( obj )\n case obj\n when Hash\n Thread.current[NAME] = obj.dup\n when Thread\n return if Thread.current == obj\n Thread.exclusive {\n Thread.current[NAME] = obj[NAME].dup if obj[NAME]\n }\n end\n\n self\n end", "def inherit( obj )\n case obj\n when Array\n Thread.current[NAME] = obj.dup\n when Thread\n return if Thread.current == obj\n Thread.exclusive {\n Thread.current[NAME] = obj[NAME].dup if obj[NAME]\n }\n end\n\n self\n end", "def in_background(&block)\n @threads ||= []\n thread = Thread.new(&block)\n thread.abort_on_exception = true\n @threads << thread\n thread\n end", "def initialize\r\n @threads = []\r\n @cordinators = []\r\n end", "def call(event, network, *args)\n if event == :register\n super\n else\n thread = Thread.new { super }\n threadgroups[network].add(thread)\n\n return thread\n end\n ensure\n if [:unregister, :disconnect].include?(event)\n thread.join\n threadgroups[network].list.each(&:join)\n end\n\n if event == :disconnect\n threadgroups.delete(network)\n end\n end", "def nakayoshi_fork(enabled = T.unsafe(nil)); end", "def threads(min, max); end", "def launch_threads num_threads, &block\n num_threads.times.map {Thread.new(block)}\n end", "def start_interrupter_thread()\r\n interrupter_tracepoint_init() if @auto_mode == :full\r\n @interrupter_thread = Thread.new() {interrupter_loop()}\r\n @interrupter_thread.priority = 1\r\n end", "def thread\n @thread ||= Thread.new(sys) do |sys|\n eval(code)\n end\n end", "def apply_threading(enum)\n if threads > 1\n enum.in_threads(threads)\n else\n enum\n end\n end", "def apply_threading(enum)\n if threads > 1\n enum.in_threads(threads)\n else\n enum\n end\n end", "def run!\n @thread && @thread.alive? ? @thread : start!\n end", "def launch_manager\n @manager_thread = Thread.new {\n until @terminate\n # sleep needs to occur b4 check workers so\n # workers are guaranteed to be terminated on @terminate\n # !FIXME! this enforces a mandatory setting of @timeout which was never intended:\n sleep @timeout\n check_workers\n end\n @pool_lock.synchronize { @manager_thread = nil }\n } unless @manager_thread\n end", "def initialize( workerCount )\n\t\t\t\tKesh::ArgTest::type( \"workerCount\", workerCount, Fixnum )\n\t\t\t\tKesh::ArgTest::intRange( \"workerCount\", workerCount, 1, 99 ) # lots of threads...\n\t\t\t\t\n\t\t\t\t@workers = []\n\t\t\t\t@jobs = []\n\t\t\t\t@status = :worker_status_idle\n\t\t\t\t\n\t\t\t\t[0..workerCount].each do |i|\n\t\t\t\t\t@workers[ i ] = AsyncWorkerThread.new( self )\n\t\t\t\tend\n\t\t\tend", "def initialize(num_threads)\n @thread_pool = Concurrent::FixedThreadPool.new(num_threads)\n @promises = []\n end", "def thread(*methods, enabled: true)\n @yuuki_methods ||= {}\n methods.each do |method|\n @yuuki_methods[method] ||= {}\n @yuuki_methods[method][:thread] = enabled\n end\n end", "def start_threads(ships_tsw, safety_message_tsw)\n @worker_thread_definitions.each do |name, d|\n puts \"Starting thread: #{name}\"\n @worker_threads[name] = d.call(ships_tsw, safety_message_tsw)\n end\n end", "def thread_safe?\n false\n end", "def initialize(obj)\n @thread = Thread.new {\n loop do\n begin\n break if obj.shutdown\n obj.reap!\n sleep(obj.reap_timeout || 600)\n rescue Exception => e\n HotTub.logger.error \"[HotTub] Reaper for #{obj.class.name} terminated with exception: #{e.message}\" if HotTub.logger\n HotTub.logger.error e.backtrace.map {|line| \" #{line}\"} if HotTub.logger\n break\n end\n end\n }\n @thread[:name] = \"HotTub::Reaper\"\n @thread.abort_on_exception = true\n @thread\n end", "def start_worker\n raise NotImplementedError, \"#{self.class} cannot respond to: #{__method__}\"\n end", "def run(background = T.unsafe(nil)); end", "def defer(&block)\n index = wait_available_slot\n @threads[index].kill if @threads[index].respond_to?(:kill)\n @threads[index] = Thread.new(&block)\n end", "def initialize\n @thread = Thread.new { loop { process_queue } }\n end", "def start_thread\n @thread_status.running = true\n @thread = Thread.new do\n @sandbox.execute\n rescue KrillError, KrillSyntaxError, ProtocolError => e\n notify(@sandbox.job)\n raise e\n ensure\n @mutex.synchronize { @thread_status.running = false }\n ActiveRecord::Base.clear_active_connections!\n puts \"#{@sandbox.job.id}: Closing ActiveRecord connection\"\n end\n end", "def engage\n threads = []\n #threads << LisaToolbox.run_in_new_thread(:engage_by_elite_tweets) {engage_by_elite_tweets}\n threads << LisaToolbox.run_in_new_thread(:engage_by_search) {engage_by_search}\n #threads << LisaToolbox.run_in_new_thread(:engage_by_realtime) {engage_by_realtime}\n threads.each { |thread| thread.join }\nend", "def unsafely\n tap { Threaded.safety_options = false }\n end", "def set_thread_name(name); end", "def update_native_thread_name\n thread = JRuby.reference(Thread.current)\n set_thread_name = Proc.new do |prefix, suffix|\n self.class.with_global_lock do\n count = self.class.system_registered_workers.size\n thread.native_thread.name = \"#{prefix}##{count}#{suffix}\"\n end\n end\n if ! name = thread.native_thread.name\n # \"#{THREAD_ID}##{count}\" :\n set_thread_name.call(WORKER_THREAD_ID, nil)\n elsif ! name.index(WORKER_THREAD_ID)\n # \"#{name}(#{THREAD_ID}##{count})\" :\n set_thread_name.call(\"#{name} (#{WORKER_THREAD_ID}\", ')')\n end\n end", "def start_trace\n @trace_thread ||= Thread.new do\n begin\n trace_loop\n rescue => ex\n log \"trace thread: #{ex}\"\n end\n end\n end", "def start_thread(link)\n if Thread.list.count < 30\n t = Thread.new do\n retrieve_motionbook(link, @agent)\n end\n @threads << t\n return t\n else\n return false\n end\nend", "def initialize num=1\n @thread_count=0\n @threads=[]\n # Other option is to use ThreadGroup.\n @queue = Queue.new\n @mutex = Mutex.new\n # Private mutex.\n self.increment(num)\n end", "def initialize\n @to_run = []\n @paused = false\n @alerter, @waker = IO.pipe\n @thread = Thread.new{ run! }\n end" ]
[ "0.6906716", "0.66781485", "0.64136916", "0.63996524", "0.63996524", "0.63996524", "0.61905897", "0.6128272", "0.6047312", "0.6047312", "0.59888667", "0.5952064", "0.5927961", "0.5898756", "0.5893623", "0.5786867", "0.57800984", "0.5765258", "0.5756319", "0.57507795", "0.5744821", "0.5738692", "0.57242167", "0.5695215", "0.5693279", "0.56735003", "0.5641229", "0.56341267", "0.55998313", "0.55948454", "0.5582645", "0.5576643", "0.5530469", "0.5503919", "0.5448372", "0.5443074", "0.54393804", "0.54393804", "0.54365015", "0.54324824", "0.54277223", "0.54277223", "0.54075533", "0.5406246", "0.53873646", "0.5380814", "0.538076", "0.53675026", "0.5364792", "0.5364792", "0.5364792", "0.5363034", "0.536144", "0.53597766", "0.5351394", "0.5344648", "0.53273237", "0.5327004", "0.5318422", "0.53053874", "0.5282049", "0.5276854", "0.52695364", "0.52586526", "0.5252628", "0.5208443", "0.5200981", "0.51820505", "0.5163619", "0.5161024", "0.51549864", "0.51523185", "0.51056", "0.50959283", "0.50754696", "0.5068061", "0.5062634", "0.5061188", "0.5061188", "0.50418884", "0.5020289", "0.5004259", "0.49973398", "0.49901578", "0.49820495", "0.49805364", "0.4977246", "0.4959194", "0.49589774", "0.49533322", "0.49500227", "0.49461722", "0.49456", "0.4943442", "0.49428692", "0.49423584", "0.49355257", "0.49329", "0.4913297", "0.4905852" ]
0.6995929
0
def add puts "holaaa" hol = params[:name] puts hol
def tag #ban = true puts "entro" port_str = "/dev/ttyACM0" #may be different for you baud_rate = 115200 data_bits = 8 stop_bits = 1 parity = SerialPort::NONE sp = SerialPort.new(port_str, baud_rate, data_bits, stop_bits, parity) # while ban do while (i = sp.gets.chomp) do i.slice!("Tag is not NDEF formatted.") tagUID = i puts "el while" puts tagUID if tagUID.empty? ban= true else $global = 0 session[:prueba] = tagUID $global = tagUID return tagUID end end #end #sp.closes end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(params)\n post 'add', params\n end", "def param_name_add(name, id, default)\n param = Hash.new\n param['name'] = name\n param['id'] = id\n param['value_db'] = 0\n param['value_ar'] = 0\n param['default'] = default\n @params << param\n end", "def param_name_add(name, id, default)\n found = false\n @params.each do |p|\n if p['name'] == name\n found = true\n end\n end\n\n if found == false\n param = Hash.new\n param['name'] = name\n param['id'] = id\n param['value_db'] = 0\n param['value_ar'] = 0\n param['default'] = default\n @params << param\n end\n end", "def add_name\n\t\tputs \"What\\'s your name?\"\n\t\t@name = gets.chomp.capitalize \n\tend", "def add(params = {})\n perform_request 'Add', params\n end", "def name\n params['name']\n end", "def add\n\tend", "def add \n end", "def add\n\nend", "def add\n end", "def add\n end", "def addPlace\n params[:new_place]\n end", "def addition_params\n params.require(:addition).permit(:name, :tags)\n end", "def reqadd(params = nil)\n if @name_index\n @conf.insert(@name_index + @conf.length, \" \" + \"reqadd \" + params.to_s + \"\\n\")\n else\n puts \"no #{@proxy_type} name assigned\"\n return false\n end\n end", "def add_params\n params.require(:add).permit(:title, :body, :active)\n end", "def name\n @name ||= params['name']\n end", "def add(params = {})\n @hydra.queue(request('add', params))\n end", "def name_params\n params.require(:name).permit(:name)\n end", "def rspadd(params = nil)\n if @name_index\n @conf.insert(@name_index + @conf.length, \" \" + \"rspadd \" + params.to_s + \"\\n\")\n else\n puts \"no #{@proxy_type} name assigned\"\n return false\n end\n end", "def add_item\n #params[item_id]\n end", "def add(word)\n end", "def add\n @input1 = params[:input1]\n @input2 = params[:input2]\n @input1 = @input1.to_i\n @input2 = @input2.to_i\n @add = @input1 + @input2\n end", "def add(*args); end", "def add_user(name)\n\t@users << {:name => name}\n end", "def param( name, *args, &block )\n\t\t\tself.log.debug \"New param %p\" % [ name ]\n\t\t\tself.log.debug \" adding parameter %p to %p\" % [ name, self.paramvalidator ]\n\t\t\tself.paramvalidator.add( name, *args, &block )\n\t\tend", "def add_word(word)\n \n end", "def add_ogone_parameter name, value\n\n # transliterate if available\n value = ActiveSupport::Inflector.transliterate(\"#{value}\")\n\n @form.add_input(name, value)\n if !value.empty?\n @hash.add_parameter(name.to_s, value)\n end\n end", "def add\n\t\t$employee_info[get_employee_id] = {first_name: get_employee_first_name, last_name: get_employee_last_name, salary: get_employee_salary, tax_rate: get_employee_tax_rate}\n\t\tputs \"\"\n\t\tputs \"\"\n\t\tputs \"Employee successfully added!\"\n\tend", "def introduce_parameter(name, age)\n puts \"My name is #{name}\"\n puts \"My age is \" + age.to_s #{name}\"\nend", "def additive_params\n params.require(:additive).permit(:name)\n end", "def add(*names); end", "def add_item(list, name, quantity = 1) \r\n# input: item name and optional quantity\r\n# steps: \r\n# create add method with name and optional quantity arguments\r\n# add name and quantity to hash\r\n list[name] = quantity\r\n# output: print \"your item has been added to the hash\"\r\n return list\r\nend", "def name(namei)\n return \"hola \" + namei + \" Welcome\"#namei->name input\nend", "def add(product_name,quantity)\n $products[product_name] = quantity\n $products\nend", "def add list\n list_action list, \"add\"\n end", "def add(name, value)\n form_data << [name.to_s, value]\n end", "def add_post_parameter(name, value)\n\t\t\t@post = appendParameter(@post, name, value)\n\t\tend", "def add_post_parameter(name, value)\n\t\t\t@post = appendParameter(@post, name, value)\n\t\tend", "def name_params\n params.require(:name).permit(:title, :subtitle)\n end", "def sl_params\n params.permit(:name)\n end", "def add(data)\n params = self.params\n data['add']['parameters'].each { |k,v|\n params[k] = v\n }\n write(params)\n data['add']['parameters']\n end", "def name_params\n params.require(:name).permit(:name, :hiragana, :katakana)\n end", "def add_parameter one\n # <leader>rap[name of param] from here\n # FIXME: make this work with g:ruby_refactoring_sans_superfluous_syntax and\n # ditch RAddParameterNB (would cause <leader>rap to respond immediately)\nend", "def index\n @name = params[:name]\n # logger.debug \"debug message\"\n # logger.warn \"warning message\"`\n end", "def req_name\n\t\tputs \"What is your name?\"\n\t\t@name = gets.chomp\n\tend", "def add(*); end", "def add_item(list,name,quantity=1)\n list[name]=quantity\n return list\nend", "def create\n # rails with indifferent access, can be accessed with string or symbol\n #@name = params[\"full_name\"]\n @name = params[:full_name]\n end", "def add(value)\n end", "def add(params)\n headers = {\n 'Cookie' => @context[:koha_rest_api_cookie],\n 'Content-Type' => 'application/json'\n }\n\n http = Net::HTTP.new(\"xkoha\", 8081)\n uri = URI(intranet(:koha_rest_api) + \"holds\")\n res = http.post(uri, params.to_json, headers)\n expect(res.code).to eq(\"201\"), \"got unexpected #{res.code} when adding hold.\\nResponse body: #{res.body}\"\n res.body\n end", "def methodWithParams nombre,apellido\n puts \"Mi nombre es #{nombre} #{apellido}\" \nend", "def add_item(name, quantity, list)\n list[name] = quantity\n p list\n return list\nend", "def add(param_name, value)\n if value.class == Hash\n if self.params.has_key?(param_name)\n if self.params[param_name].class == Hash\n self.params[param_name].merge!(value)\n elsif self.params.has_key?(param_name)\n if self.params[param_name].class != value.class\n raise ArgumentError, \"#{param_name} already exists, and is of different type!\"\n end\n end\n else\n self.params[param_name] = value\n end\n if ! self.groups.include?(param_name)\n self.groups.push(param_name)\n end\n else\n self.params[param_name] = value\n end\n end", "def create\n puts params\n end", "def add_key_input(name); end", "def razdel_params\n params.require(:razdel).permit(:name)\n end", "def razdel_params\n params.require(:razdel).permit(:name)\n end", "def add_to_user\n @user = current_user;\n @user.diet_type = DietType.find_by_name(diet_type_params[:name])\n \n respond_to do |format|\n flash[:success] = 'diet type was successfully add.'\n format.html { redirect_to action:\"index\" }\n end\n end", "def list_add(list, item_name, quantity=1)\r\n list[item_name] = quantity\r\n p list\r\nend", "def edit_name\n # p \"-\"* 50\n # p \"edit name\"\n # @name_exercise_id = params[:format]\n end", "def add (arg1, arg2)\n puts arg1 + arg2\nend", "def my_name_params\n params.require(:my_name).permit(:name, :color)\n end", "def add_member(lineup)\n puts \"you have chosen to add a spice girl. Enter the name of the girl: \"\n lineup.list_names2\n pick_newby = gets.chomp\n lineup.name_check=(pick_newby)\n lineup.add_girl(pick_newby)\n puts \"your new lineup is :\"\n lineup.list_names\nend", "def add\n\t\tputs \"\\nEnter the name of the item you would like to add:\"\n\t\tnew_item = gets.chomp\n\t\tputs \"\\nHow many #{new_item} would you like?\"\n\t\tnew_qty = gets.chomp\n\t\t@grocery_list.add(new_item, new_qty)\n\t\tputs \"\\nAdding #{new_item} to list...\"\n\t\tputs \"#{new_qty} #{new_item} were added to your Grocery List!\"\n\tend", "def add (p)\n @people << p \n end", "def name(name)\n @name = name\nend", "def set_add\n @add = Add.friendly.find(params[:id])\n end", "def add(name, value = nil)\n symbols << [name.to_s, (Integer(value) if value)]\n end", "def add_animal(species, age)\n puts \"What species of animal are you bringing in?\"\n @species = gets.chomp.downcase\n\n puts \"How old is the animal?\"\n @age = gets.chomp.to_s\n end", "def addinfo\n\nend", "def query_by_name1(name1)\r\n @PARAM_HASH[\"name1\"] = name1\r\n end", "def query_by_name1(name1)\r\n @PARAM_HASH[\"name1\"] = name1\r\n end", "def add(arg1, arg2)\n\targ1 + arg2\nend", "def add(value)\n\tend", "def add_stuff(a_string_param)\n a_string_param += \"rutabaga\"\nend", "def vars_add(name,obj)\n @to_load << {:name => name, :obj => obj} \n end", "def add_word(word)\r\n \r\n end", "def add name\n\t\t\tif name.class == 'String'\n\t\t\t\tpush name\n\t\t\telse\n\t\t\t\tname.each do | n |\n\t\t\t\t\tpush n\n\t\t\t\tend\n\t\t\tend\n\t\tend", "def query_by_name1(name1)\n @PARAM_HASH[\"name1\"] = name1\n end", "def introduce \n puts \"Hi, my name is #{self.name}\"\n end", "def create\n @name = Name.new(name_params)\n if @name.save\n flash[:notice] = \"Saved successfully.\"\n else\n flash[:error] = \"Not saved.\"\n redirect_to :back\n end\n end", "def list_add(list, item_name, quantity=1)\n list[item_name] = quantity\n p list\nend", "def arl_params\n params.require(:arl).permit(:name)\n end", "def ahmed_params\n params.require(:ahmed).permit(:name)\n end", "def introduce()\r\n\t\tprint \"I am #{@name}, I am #{@age} years old\"\r\n\r\nend", "def user(name:\"\", email:\"\")\n puts \"New user #{name}, email: #{email}\"\nend", "def tk_add\n \"add planet #{id} - 10 #{x} #{y} 0\\n\" +\n \"param #{id} 0 #{size}\\n\" +\n \"param #{id} 1 #{-size}\\n\" +\n \"param #{id} 2 #{color}\\n\" +\n \"param #{id} 3 #{name}\"\n end", "def param(name, \n example: '', \n type: :string, \n required: false,\n description: '')\n @params << Apipony::Parameter.new(name, example, type, required, description)\n end", "def add_song_by_name(song_title) #enter string title\n song = Song.new(song_title) # initialize new song by title\n self.add_song(song)\t# call add_song method to\n\tend", "def add(*args)\n params = { name: nil, group: nil, length: 128, value: nil, force: false }\n case args.length\n when 1\n # add('foo')\n params[:name] = args.first\n when 2\n if args.all? { |a| a.is_a?(String) }\n # add('my_app', 'foo')\n params[:group], params[:name] = args\n elsif args[1].is_a?(Hash)\n # add('my_app', value: 'something')\n # add('foo', length: 50)\n params[:name] = args.first\n params.merge!(args[1])\n end\n when 3\n # add('my_app', 'foo', value: 'something')\n # add('my_app', 'foo', length: 50)\n params[:group], params[:name] = args[0], args[1]\n params.merge!(args[2])\n else\n raise ArgumentError, \"wrong number of arguments (given #{args.length}, expected 1-3)\"\n end\n\n add_from_params(params)\n end", "def create\n a_hash = {user_id: current_user.id}\n name_params.merge! a_hash\n @name = Name.new(name_params)\n\n respond_to do |format|\n if @name.save\n format.html { redirect_to @name, notice: 'Name was successfully created.' }\n format.json { render action: 'show', status: :created, location: @name }\n else\n format.html { render action: 'new' }\n format.json { render json: @name.errors, status: :unprocessable_entity }\n end\n end\n end", "def add\n\t\tquest = Quest.new\n\t\tquest.name = params[:name]\n\t\tquest.address = params[:address]\n\t\tquest.hint = params[:hint]\n\t\tquest.brief = params[:brief]\n\t\tquest.latitude = params[:latitude] || 0\n\t\tquest.latitude = quest.latitude.to_d\n\t\tquest.longitude = params[:longitude] || 0\n\t\tquest.longitude = quest.longitude.to_d\n\t\tquest.difficulty = params[:difficulty] || 0\n\t\tquest.difficulty = quest.difficulty.to_i\t\n\t\tquest.place_name = params[:place_name]\n\t\tquest.phone = params[:phone]\n\t\tquest.fun_facts = params[:fun_facts]\n\t\t\n\t\tif quest.save\n\t\t\trender json: quest, root: true\n\t\telse\n\t\t\trender json: {error: \"could not add quest\"}\n\t\tend\n\tend", "def name (name)\n @name = name\n end", "def add_parameter(name, value = 0, description = \"\")\n\t\t\tif (@parameters.key? name) or (@species.key? name)\n\t\t\t\traise \"duplicate parameter #{name}\"\n\t\t\tend\n\n\t\t\t@parameters[name] = Parameter.new(name, value, description)\n\n\t\t\tself\n\t\tend", "def add(a,b)\n puts \"Adding #{a} + #{b}\"\n a + b\nend", "def adduser(journal)\n\tputs \"What is the username that you want to add?\"\n\tuser_name = gets.chomp.capitalize\n\tjournal.execute(\"INSERT INTO users (name) VALUES (?)\", [user_name])\nend", "def hello1(name)\n 'halo ' + name\nend", "def add(a, b)\r\n a + b \r\n end", "def to_param\n \tname\n end", "def saluda_persona(nombre=\"humano\")\n puts \"Hola #{nombre}\"\nend", "def add_member_name(name)\n\n end" ]
[ "0.7055181", "0.7051024", "0.68499786", "0.67512953", "0.66545373", "0.6561442", "0.6475256", "0.64695305", "0.6437187", "0.6374592", "0.6374592", "0.6358891", "0.63342464", "0.6305712", "0.6287832", "0.6258247", "0.6246468", "0.62252754", "0.6202081", "0.6164553", "0.61550236", "0.6135077", "0.61338", "0.6090193", "0.6085324", "0.60411286", "0.6039778", "0.6026852", "0.60258037", "0.601493", "0.6008461", "0.6003131", "0.59856755", "0.59841156", "0.5960917", "0.5960735", "0.5957781", "0.5957781", "0.5954572", "0.5953053", "0.59449136", "0.59422904", "0.59345055", "0.59130156", "0.58974594", "0.58874106", "0.58846337", "0.5875761", "0.5871693", "0.5856984", "0.5854282", "0.58531517", "0.58473235", "0.58421314", "0.5840713", "0.58393687", "0.58393687", "0.5837493", "0.5837208", "0.5836702", "0.5836183", "0.5833798", "0.5830355", "0.5821979", "0.5820031", "0.58181155", "0.5807805", "0.57984", "0.5797216", "0.57970184", "0.5796242", "0.5796242", "0.5789076", "0.5783994", "0.5779", "0.5778995", "0.577891", "0.5774206", "0.5770881", "0.57678753", "0.5765412", "0.5759806", "0.57582706", "0.5756946", "0.575095", "0.5747689", "0.57439363", "0.57326335", "0.5732557", "0.573114", "0.57302874", "0.57250106", "0.57230884", "0.5713223", "0.5703722", "0.57021475", "0.5698228", "0.56962246", "0.56939787", "0.5693009", "0.569158" ]
0.0
-1
Confirm the correct user, for authorization purposes such as updating their own profile and not any other user's profile information.
def correct_user @user = User.find(params[:id]) redirect_to '/' unless @user == current_user || current_user.isadmin? end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def correct_user\n msg = \"You do not have permission to update another user's information\"\n require_correct_user(@user, msg)\n end", "def correct_user\n set_user\n unless current_user?(@user)\n flash[:danger] = 'This action is not permitted for this account since you are not the owner'\n redirect_to overview_user_path(current_user)\n end\n end", "def correct_user\n unless helpers.current_user?(@user)\n flash[:danger] = \"You don't have permission to do that\"\n redirect_to root_path\n end\n end", "def confirm\n user = User.find(params[:id])\n authorize user\n if user.state != \"active\"\n user.confirm\n user.make_user_a_member\n\n # assume this type of user just activated someone from somewhere else in the app\n flash['notice'] = \"Activation of #{user.name_and_login} complete.\"\n redirect_to(session[:return_to] || root_path)\n end\n end", "def correct_user\n @user = User.find(params[:id])\n unless current_user?(@user)\n flash[:danger] = \"Please don't mess with others' profiles!\"\n # redirect_to root_url\n redirect_to @user\n end\n end", "def correct_user\n @user = User.find(params[:id])\n unless @user == current_user\n flash[:danger] = 'You are not authorized to do that.'\n redirect_to(root_url)\n end\n end", "def correct_user\n @user = User.find(params[:id])\n if current_user != @user\n flash[:danger] = \"You don't have permission for that\"\n redirect_to(root_url) unless current_user?(@user)\n end\n end", "def correct_user\n @user = User.find(params[:id])\n if @user != current_user\n flash[:alert] = \"Action not authorized\"\n redirect_to(root_url)\n end\n end", "def correct_user\n @user = User.find(params[:id])\n unless current_user?(@user)\n flash.now[:danger] = \"You can only change your own profile.\"\n redirect_to signin_url\n end\n end", "def correct_user\n @user = User.find(params[:user_id])\n redirect_to('/unauthorized') unless current_user?(@user)\n end", "def confirm_user\n @user = User.find_by_confirmation_token(params[:user][:confirmation_token])\n if @user.blank?\n render :confirm_user_failed\n return\n end \n # Slim down the hash I send to Devise to avoid Mass-Update-problems:\n params_user = {'name' => params[:user][:name], \n 'password' => params[:user][:password], \n 'password_confirmation' => params[:user][:password_confirmation]\n }\n if @user.update_attributes(params_user) and @user.password_match?\n @user = User.confirm_by_token(@user.confirmation_token)\n set_flash_message :notice, :confirmed \n sign_in_and_redirect(\"user\", @user)\n else\n render :confirm_user_failed\n return\n end # if\n end", "def correct_user\n\t\t\tauthenticate_user!\n\t\t\tunless @user == current_user || current_user.admin?\n\t\t\t\tredirect_to (root_path)\n\t\t\t\tflash[:alert]\n\t\t\tend\n\t\tend", "def confirm\n if @user = UserConfirmsAccount.new(:token => params[:token]).call\n self.establish_session @user\n redirect_to profile_url, :notice => \"Thanks for confirming #{@user.email}\"\n else\n redirect_to profile_url, :notice => \"There was a problem confirming - try re-sending the email?\"\n end\n end", "def correct_user\n @user = User.find(params[:id])\n unless current_user?(@user)\n flash[:danger] = \n \"You do not have permission to access #{@user.name}'s account.\"\n redirect_to(root_url)\n end\n end", "def confirm\n if current_visitor && current_visitor.has_role?('admin', 'manager')\n user = User.find(params[:id]) unless params[:id].blank?\n if !params[:id].blank? && user && user.state != \"active\"\n user.confirm!\n user.make_user_a_member\n # assume this type of user just activated someone from somewhere else in the app\n flash[:notice] = \"Activation of #{user.name_and_login} complete.\"\n redirect_to(session[:return_to] || root_path)\n end\n else\n flash[:notice] = \"Please login as an administrator.\"\n redirect_to(root_path)\n end\n end", "def confirm_matching\n @user = User.find(params[:id])\n redirect_to root_path unless current_user? @user\n end", "def correct_user\n @user = User.find(params[:id])\n unless @user == current_user\n flash.now[:alert] = 'You cannot access this area because this is not your profile'\n redirect_to(root_url)\n end\n end", "def correct_user\n @user = User.find(params[:id])\n unless current_user?(@user)\n flash[:danger] = \"Yikes. Sorry, but it doesn't look you have permission to do that 😬\"\n redirect_back(fallback_location: root_url)\n end\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to root_url, notice: \"You do not have permission to view or edit this information because it isn't yours.\" unless current_user?(@user)\n end", "def confirm\n \t\tuser = User.find_by_uid params[:uid]\n \t\tuser.confirm_email params[:confirmation_code]\n \t\tif not user.confirmed?\n \t\tflash[:error] = \"You're link doesn't match what we have on record.\"\n \t\tredirect_to signup_path\n \t\telse\n \t\tsession[:user] = user\n \t\tflash[:success] = \"We will update you at #{user.email} with new information as it because available\"\n \t\tredirect_to dashboard_path\n \t\tend \n \tend", "def correct_user\n unless current_user?(@user)\n render json: { errors: [\"User not authorized to modify an account that doesn't belong to them\"] }, status: :unauthorized\n end\n end", "def correct_user\n user = User.find(params[:id])\n unless current_user?(user) \n flash[:danger] = \"Uncorrect user.\"\n redirect_to(root_url) \n end\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to root_path, alert: \"You do not have access to that page\" unless current_user == @user\n end", "def correct_user\n @user = User.find(params[:user_id])\n unless current_user?(@user)\n flash[:danger] = \"You don't have permission for that action.\"\n redirect_to(root_url)\n end\n end", "def correct_user_for_profile\n @user = User.find(params[:id])\n unless current_user?(@user)\n flash[:danger] = \"Log in as correct user.\"\n redirect_to(root_url)\n end \n end", "def correct_user\n\t\t\t@user = User.find(params[:id])\n\t\t\tif current_user != @user\n\t\t\t\tredirect_back(fallback_location: root_path)\n\t\t\tend\n\t\tend", "def correct_user\n\t @user = User.find(params[:id])\n\t unless current_user?(@user)\n\t flash[:danger] = \"You don't have rights\"\n\t\t\tredirect_back_or(root_url)\n\t end\n\tend", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user?(@user)\n flash[:danger] = \"Admin Access Only.\"\n end", "def correct_user\n user_id = params[:user_id] || params[:id] || session[:user_id]\n @user = User.find_by(id: user_id)\n unless @user.nil?\n unless current_user?(@user) || current_user.administrator?\n flash[:danger] = \"Only the account owner or an adminstrator to do that.\"\n redirect_to(root_path)\n end\n else\n nonexistent_user_error\n end\n end", "def correct_user\n @user = User.find(params[:id])\n if !current_user?(@user)\n flash[:danger] = \"Sorry, you're aren't allowed to access that.\"\n redirect_to(\"/#flash\") \n end\n end", "def correct_user\n @user = HoacUser.find(params[:id])\n redirect_to(edit_hoac_user_path) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id]) if User.exists?(params[:id])\n redirect_to(edit_user_path(current_user)) unless current_user == @user\n end", "def appctrl_confirm_user\n redirect_to( signin_path() ) unless @current_user\n end", "def confirm!\n # add if else here in case user already existed and is updating/changing data (email etc),\n # shouldn't add_profile again if profile already exists. can determine with a user db 'nil' value...\n unless self.profile\n add_profile\n end\n super\n end", "def correct_user\n unless Presenter.find_by(user_id: current_user) == find_presenter\n if current_user.admin?\n redirect_to edit_presenter_profile_path\n else\n flash[:danger] = \"Unauthorized Access\"\n redirect_to root_url\n end \n end\n end", "def correct_user\n unless @user == current_user\n redirect_to user_notes_path(current_user)\n end\n end", "def correct_user\n\t\tunless current_user == @univers.user\n\t\t\tflash[:danger] = \"You have no power there\"\n\t\t\tredirect_to universes_path\n end\n end", "def correct_user\n if !is_correct_user\n redirect_to incorrect_user_path_for\n end\n end", "def correct_user\n \n redirect_to(login_path) unless current_user?(@user)\n end", "def correct_user\n @user = User.find_by(id: params[:id])\n if(@user.nil?)\n flash[:danger] = \"Error. User does not exist\"\n redirect_to users_url\n end\n \n #Admins have access to everything\n if(current_user.admin?)\n return true\n end\n #If a user is trying to view another users page they are redirected to the homepage\n if( !(@user == current_user))\n flash[:danger] = \"You don't have permission to access this page\"\n redirect_to root_url\n end\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_path) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_path) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_path) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_path) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_path) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_path) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_path) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_path) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_path) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless @user == current_user\n end", "def correct_user\n redirect_to(root_url) unless @user == current_user\n end", "def verify\n\t\tif user = check_admin\n\t\t\ttarget_user = User.find_by(id: params[:user_id])\n\t\t\ttarget_user.toggle!(:verified)\n\t\t\tredirect_back fallback_location: '/home'\n\t\tend\n\tend", "def confirm\n confirm_user(params[:confirmation_code])\n if @user and @user.confirmed?\n @current_user_session = FrontendUserSession.new\n render :template => 'frontend/users/confirmed'\n else\n render :template => 'frontend/users/confirm_now'\n end\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(user_root_path,:notice => 'You cannot access this page') unless current_user == @user\n end", "def correct_user\n @question = Question.find(params[:id])\n redirect_to(root_url) unless current_user == @question.user\n end", "def correct_user\n\t\t\tif !current_user?(@user) && !current_user.orgadmin?\n\t\t\t\tredirect_to(users_path)\n\t\t\tend\n\t\tend", "def require_same_user\n if current_user != @user\n flash[:error] = \"You do not have permisions to edit a user other than yourself\"\n redirect_to root_path\n end \n end", "def correct_user\n @user = User.find(params[:id])\n # redirect_to root_path, notice: \"You do not have permission.\" unless @user == \"Admin\" || @user == \"Member\"\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless @user == current_user\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless @user == current_user\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless @user == current_user\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless @user == current_user\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless @user == current_user\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless @user == current_user\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless @user == current_user\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless @user == current_user\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless @user == current_user\n end", "def correct_user\n @user = User.find(params[:id])\n unless current_user?(@user) || (logged_in? && current_user.admin?)\n if logged_in?\n flash[:info] = 'You don\\'t have permission.'\n redirect_to(current_user)\n else \n flash[:danger] = 'Please log in.' \n redirect_to(login_url) \n end\n end\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to root_path unless @user == current_user\n end", "def correct_user\n @user = User.find(params[:user_id])\n redirect_to(current_user) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:user_id])\n redirect_to(current_user) unless current_user?(@user)\n end", "def confirm\n response = CoachClient::Request.put(url, username: @user2.username,\n password: @user2.password,\n payload: payload,\n content_type: :xml)\n unless response.code == 200 || response.code == 201\n fail CoachClient::NotConfirmed.new(self), 'Could not confirm partnership'\n end\n set_user_confirmed(response.to_h)\n self\n end", "def correct_user\n @user = User.find(params[:id])\n if !current_user?(@user)\n message = \"currently logged in as #{current_user.name}. Not you? \"\n message += \"#{view_context.link_to('Log out.', log_out)}\".html_safe\n flash[:warning] = message\n redirect_to(root_url)\n end\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user_help?(@user)\n end", "def correct_user\n redirect_to(root_url) unless current_user?(@user)\n end", "def verify_correct_user\n @user = User.find(params[:id])\n # current_user is a function defined in sessions_helper\n if not @user == current_user\n flash[:danger] = \"Unauthorized Access.\"\n redirect_to listings_path\n end\n end", "def confirm\n user = User.find_by_email(params[:email])\n # make sure user do not use the string \"used\" to hack the system\n if user.token != \"used\" && params[:token] == user.token\n user.confirm = true\n user.token = \"used\" #token is only for one time use\n if user.save\n session[:user_id] = user.id\n redirect_to main_path\n end\n else\n render :text => \"You have confirmed before. Or something went wrong\"\n end\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user?(@user)\n end", "def correct_user\n @user = User.find(params[:id])\n redirect_to(root_url) unless current_user?(@user)\n end" ]
[ "0.7820566", "0.7487571", "0.747985", "0.7421584", "0.7403223", "0.7374018", "0.73545015", "0.73001206", "0.7287012", "0.7282314", "0.7262213", "0.72599703", "0.7259919", "0.7256439", "0.7236024", "0.7179469", "0.7169858", "0.7164624", "0.7161283", "0.7150804", "0.71371025", "0.70840013", "0.706256", "0.7028457", "0.70227003", "0.6996182", "0.69914126", "0.6967446", "0.6964433", "0.69600964", "0.6942265", "0.692678", "0.69199544", "0.6917273", "0.69053", "0.68984425", "0.6880593", "0.68272465", "0.6821665", "0.6804608", "0.6797", "0.6794223", "0.6794223", "0.6794223", "0.6794223", "0.6794223", "0.6794223", "0.6794223", "0.6794223", "0.6793209", "0.6790602", "0.678563", "0.67833304", "0.67785287", "0.67717314", "0.6771596", "0.6770303", "0.67691237", "0.6767707", "0.6767707", "0.6767707", "0.6767707", "0.6767707", "0.6767707", "0.6767707", "0.6767707", "0.6767707", "0.6764689", "0.6762877", "0.67616576", "0.67616576", "0.67608064", "0.6760371", "0.6760202", "0.67569774", "0.67565995", "0.6751689", "0.6739173", "0.6739173", "0.6739173", "0.6739173", "0.6739173", "0.6739173", "0.6739173", "0.6739173", "0.6739173", "0.6739173", "0.6739173", "0.6739173", "0.6739173", "0.6739173", "0.6739173", "0.6739173", "0.6739173", "0.6739173", "0.6739173", "0.6739173", "0.6739173", "0.6739173", "0.6739173", "0.6739173" ]
0.0
-1
Admin users will have special permissions, such as deleting members or posts.
def admin_user redirect_to('/') unless current_user.isadmin? end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def admin_only\n deny_access(\"Necesitas tener privilegios de administrador para entrar.\") unless signed_in_as_admin?\n end", "def custom_permissions\n if current_user.admin?\n can :manage, :all\n end\n end", "def custom_permissions\n # Limits deleting objects to a the admin user\n #\n # if current_user.admin?\n # can [:destroy], ActiveFedora::Base\n # end\n\n if current_user.admin?\n # Role management\n # don't allow :destroy, :edit, :create\n # - destroy adds a 'delete' button that\n # - could be clicked accidentally\n # - would be very infrequently used (if ever)\n # - implications of edit are unclear for associated actions\n # - create is meaningless without associating actions which happens in code.\n can [:read, :add_user, :remove_user], Role\n end\n\n # Limits creating new objects to a specific group\n #\n # if user_groups.include? 'special_group'\n # can [:create], ActiveFedora::Base\n # end\n end", "def admin_actions(user)\n can_act_as_logged_in_user(user)\n can_view_any_profile\n can_view_any_gallery\n can_edit_saved_queries\n can_curate\n can_update_metadata\n can_administer\n end", "def admin_in!\n access_denied! unless current_user.admin?\n end", "def custom_permissions\n if user_groups.include?(\"admin\")\n can :manage, :all\n end\n end", "def custom_permissions\n # Limits deleting objects to a the admin user\n #\n # if current_user.admin?\n # can [:destroy], ActiveFedora::Base\n # end\nif current_user.admin?\n\t can [:create, :show, :add_user, :remove_user, :index], Role\n\t end\n # Limits creating new objects to a specific group\n #\n # if user_groups.include? 'special_group'\n # can [:create], ActiveFedora::Base\n # end\n\n\n\n end", "def must_be_admin!\n access_denied! unless current_admin?\n end", "def custom_permissions\n # Limits deleting objects to a the admin user\n #\n # if current_user.admin?\n # can [:destroy], ActiveFedora::Base\n # end\n\n # Limits creating new objects to a specific group\n #\n # if user_groups.include? 'special_group'\n # can [:create], ActiveFedora::Base\n # end\n end", "def admin\n\t\tcan :manage, :all\n\tend", "def admin_access_required\n access_denied unless admin?\n end", "def admin_access_required\n access_denied unless admin?\n end", "def admin_access_required\n access_denied unless admin?\n end", "def custom_permissions\n # Limits deleting objects to a the admin user\n #\n # if current_user.admin?\n # can [:destroy], ActiveFedora::Base\n # end\n\n # Limits creating new objects to a specific group\n #\n # if user_groups.include? 'special_group'\n # can [:create], ActiveFedora::Base\n # end\n\n if current_user.admin?\n can [:create, :show, :add_user, :remove_user, :index, :edit, :update, :destroy], Role\n end\n\n# if current_user.contentadmin?\n# can [:create, :destroy], GwWork\n# can [:create, :destroy], GwEtd\n# end\n end", "def authorize_admin!\n authorize! :manage, :all\n end", "def admin_permissions\n can [:manage], :all\n end", "def admin_permissions\n can [:manage], :all\n end", "def authorize_admin\n redirect_to :login unless current_user.permission.manage_app ||\n current_user.permission.manage_attrs ||\n current_user.permission.manage_achievement_categories ||\n current_user.permission.manage_talent_trees ||\n current_user.permission.manage_talents ||\n current_user.permission.manage_quests ||\n current_user.permission.manage_skills ||\n current_user.permission.manage_achievements ||\n current_user.permission.manage_items ||\n current_user.permission.manage_titles\n end", "def admin_user\n render_forbidden unless current_user.admin?\n end", "def permission_required \n render_403 unless admin? || @user == current_user\n end", "def admin_authorize\n unless admin?\n unauthorized_access\n end\n end", "def ensure_admin!\n authorize! :read, :admin_dashboard\n end", "def ensure_admin!\n authorize! :read, :admin_dashboard\n end", "def custom_permissions\n # Limits deleting objects to a the admin user\n #\n # if current_user.admin?\n # can [:destroy], ActiveFedora::Base\n # end\n if current_user.admin?\n can [:create, :show, :add_user, :remove_user, :index, :edit, :update, :destroy], Role\n # Admin user can create works of all work types\n can :create, curation_concerns_models\n end\n # Limits creating new objects to a specific group\n #\n # if user_groups.include? 'special_group'\n # can [:create], ActiveFedora::Base\n # end\n end", "def admin_only\n deny_access(\"You must be signed in as an admin to access this page.\") unless signed_in_as_admin?\n end", "def require_admin\n grant_access?(\"index\", \"users\")\n #position?('admin')\n end", "def custom_permissions\n if admin?\n can [:confirm_delete], ActiveFedora::Base\n can [:allow_downloads, :prevent_downloads], AdminSet\n\n can :manage, Spotlight::HomePage\n can :manage, Spotlight::Exhibit\n end\n\n can :read, Spotlight::HomePage\n can :read, Spotlight::Exhibit\n\n # Limits creating new objects to a specific group\n #\n # if user_groups.include? 'special_group'\n # can [:create], ActiveFedora::Base\n # end\n end", "def admin_only\n logged_in_as_admin? || admin_only_access_denied\n end", "def authorize_as_admin\n if current_user.nil?\n head :unauthorized\n elsif !current_user.is_admin?\n render json: { status: 200, msg: 'You do not have permission to delete this!!!' }\n end\n end", "def check_admin_only\n\t\t# Check permissions\n\t\tif (not @current_user.is_administrator?)\n\t\t\tredirect_to root_path, notice: \"Access Denied\"\n\t\t\treturn\n\t\tend\n\tend", "def only_authorize_admin!\n authorize!(is?(:admin))\n end", "def require_admin\n deny_wrong_user if !admin?\n end", "def administrator\n can :manage, :all\n end", "def admin_user\n redirect_to root_url, notice: \"You do not have permission to view or edit this information.\" unless current_user.admin?\n end", "def admin\n can :manage, :all\n end", "def admin_required\n current_user.is_admin? || access_denied\n end", "def is_admin?\n !self.permissions.empty?\n end", "def enforce_permissions\n bounce unless is_admin?\n end", "def check_permission\n redirect_to dashboard_path, notice: 'You are not authorised to perform this action.' unless current_user&.admin?\n end", "def admin_permission\n if session[:position].to_s == \"Secretary\" or\n session[:position].to_s == \"Treasurer\" or\n session[:position].to_s == \"Chairman\"\n flash[:notice] = \"RESTRICTED: you do not have access\"\n redirect_to controller: :access, action: :admin_menu, :id => session[:user_id],\n position: session[:position]\n return false\n end\n\n end", "def admin_member\n redirect_to root_path, notice: \"You are not authorized to do that\" if !current_member.admin?\n end", "def is_admin?\n\t\tself.permission_level >= 3\n\tend", "def authorize_admin\n\t\tauthorize( ADMIN_USER_LEVEL ) \n\tend", "def _admin_or_self\n not_authorizaed unless current_user.admin? || current_user == @user\n end", "def require_admin_permission\n redirect_to tables_path, notice: 'Necesita permisos de administrador para visualizar la configuracion' unless current_user_admin?\n end", "def deny_admin_suicide\n raise 'admin suicided' if User.count(&:admin) <= 1\n end", "def deny_admin_suicide\n raise 'admin suicided' if User.count(&:admin) <= 1\n end", "def authorize_admin_manage_users\n unless current_user.permission.manage_users\n redirect_back fallback_location: root_path\n end\n end", "def authorize_admin\n return unless !current_user.admin?\n redirect_to root_path, alert: 'Admins only!'\n end", "def authorize_admin\n return unless !current_user.admin?\n redirect_to root_path, alert: 'Admins only!'\n end", "def admin_user\n unless current_user && current_user.admin?\n redirect_to login_url, notice: \"admin can only do this action.\" \n end\n end", "def admin_required\n current_user.respond_to?('is_admin') && current_user.send('is_admin') || access_denied\n end", "def custom_permissions\n # Limits deleting objects to a the admin user\n #\n # if current_user.admin?\n # can [:destroy], ActiveFedora::Base\n # end\n\n # Limits creating new objects to a specific group\n\n if user_groups.include? ['all_project_writers']\n can [:create], PulStore::Base\n can [:create], PulStore::Lae::Box\n can [:create], PulStore::Lae::Folder\n can [:create], Pulstore::Lae::HardDrive\n end\n\n if user_groups.include? ['lae_project_writers']\n can [:create], PulStore::Lae::Box\n can [:create], PulStore::Lae::Folder\n can [:create], Pulstore::Lae::HardDrive\n end \n\n if user_groups.include? ['all_project_writers']\n can [:destroy], PulStore::Base\n end\n\n if user_groups.include? ['lae_project_readers', 'all_project_readers' ]\n can [:show], PulStore::Base\n end\n end", "def admin_only\n return if admin_user?\n\n add_message 'Insufficient permission to view page'\n redirect_to '/'\n end", "def admin_only\n unless current_user.admin?\n redirect_to :back, :alert => \"Access denied.\"\n end\n end", "def admin_only\n if !Volt.current_user.admin\n redirect_to '/login'\n end\n end", "def super_admin(user)\n can :manage, :all\n end", "def admin_only\n false\n end", "def authorize_admin\n return unless current_user.admin?\n redirect_to root_path, alert: 'Admins only!'\n end", "def check_admin_user\n unless current_user && current_user.privilege_admin?\n flash[:danger] = \"You do not have permission to perform this operation\"\n redirect_to root_path\n end\n end", "def authorize_user_to_delete\n post = Post.find(params[:id])\n unless current_user == post.user || current_user.admin?\n flash[:alert] = \"You must be an admin to do that.\"\n redirect_to [post.topic, post]\n end\n end", "def admin_only\n unless current_user.admin\n redirect_to home_path, notice: \n \"You must be an admin to perform that function!\"\n end\n end", "def check_permissions\n unless current_user.is_admin?\n redirect_to index_path, alert: 'You do not have the permissions to visit the admin page'\n end\n end", "def authorize_admin\n redirect_to root_path, notice: \"You don't have access to admin pages.\" if !current_user.admin?\n end", "def admin?\n permissions == 'admin'\n end", "def admin_only\n\t\t\tif logged_in?\n\t\t\t\tif User.find_by(id: current_user.id.to_i).admin != true\n\t\t\t\t\tredirect_to root_path, :alert => \"Odmowa dostępu musisz być adminem\"\n\t\t\t\tend\n\t\t\tend\n\t\tend", "def permission_required \n render_403 unless admin? || @item.is_editable_by?(current_user)\n end", "def admin_only\n @user = current_user\n if @user.role != \"admin\"\n redirect_to root_path\n end\n end", "def has_admin_access?\n admin? || moderator?\n end", "def admin\n #manager\n #include User,CmsRole\n can :manage, :all\n end", "def authorize_admin\n return unless !current_admin\n redirect_to root_path, alert: 'Admins only!'\n end", "def can_edit_admin_objects?\n in_role?('admin', 'super-user', 'releng') # pm?\n end", "def require_admin!\n return if user_signed_in? && current_user.admin?\n\n flash[:error] = t('flash.no_permission')\n redirect_to_root\n end", "def enforce_user_is_admin\n return if user_signed_in? && current_user.admin?\n raise CanCan::AccessDenied\n end", "def admin_check\n render_401 && return unless current_user\n render_403 && return unless current_user.admin?\n end", "def admin_only!\n\tif !current_user || !current_user.administrator\n\t\tredirect \"/\"\n\tend\nend", "def allow_if_admin\n unless is_admin?\n flash[:danger] = \"Administration permissions needed to access to this page\"\n redirect_to new_user_session_path\n end\n end", "def custom_permissions\n # Limits deleting objects to a the admin user\n #\n # if current_user.admin?\n # can [:destroy], ActiveFedora::Base\n # end\n\n # Limits creating new objects to a specific group\n #\n # if user_groups.include? 'special_group'\n # can [:create], ActiveFedora::Base\n # end\n can [:create, :show, :add_user, :remove_user, :index, :edit, :update, :destroy], Role if current_user.admin?\n\n can [:fa_overview], ActiveFedora::Base\n can [:advanced], ActiveFedora::Base\n can [:streets], ActiveFedora::Base\n can [:pdf_page], ActiveFedora::Base\n can [:pdf_page_metadata], ActiveFedora::Base\n can [:bookreader], ActiveFedora::Base\n can [:imageviewer], ActiveFedora::Base\n can [:streetsviewer], ActiveFedora::Base\n can [:fa_series], ActiveFedora::Base\n can [:audio_transcriptonly], ActiveFedora::Base\n can [:video_transcriptonly], ActiveFedora::Base\n end", "def authorize_user\n post = Post.find(params[:id])\n\n unless current_user == post.user || current_user.admin?\n flash[:alert] = \"You must be an admin to do that.\"\n redirect_to [post.topic, post]\n end\n end", "def check_admin_permissions\n if !@current_user.has_role?(:backend)\n redirect_to root_path\n end\n end", "def admin?; current_user.admin?; end", "def admin?; current_user.admin?; end", "def user_is_admin\n unless logged_in? and is_admin?\n respond_with_error(\n \"You must have admin permissions to perform this action.\", \n root_path)\n end\n end", "def restrict_to_admin\n unless is_admin\n flash[:danger] = \"You are not an administrator.\"\n redirect_to root_url\n end\n end", "def must_be_admin\n if current_user.is? \"EA\"\n return true\n else\n redirect_to root_path,:notice =>\"Access Denied...\"\n end\n end", "def custom_permissions\n alias_action :show, :manifest, to: :read\n alias_action :color_pdf, :pdf, :edit, :browse_everything_files, :structure, :file_manager, to: :modify\n roles.each do |role|\n send \"#{role}_permissions\" if current_user.send \"#{role}?\"\n end\n end", "def custom_permissions\n alias_action :show, :manifest, to: :read\n alias_action :color_pdf, :pdf, :edit, :browse_everything_files, :structure, :file_manager, to: :modify\n roles.each do |role|\n send \"#{role}_permissions\" if current_user.send \"#{role}?\"\n end\n end", "def admin?( id )\n acl.has_permission?(id, AlbumACL::ADMIN_ROLE) || SystemRightsACL.singleton.has_permission?(id, SystemRightsACL::SUPPORT_HERO_ROLE)\n end", "def handle_admin_permissions()\n if !session[:debater].is_admin\n redirect_to(:controller => :debater, :action => :login_form, :message => \"must login as admin to make specified request\")\n return false\n end\n return true\n end", "def isAdmin?\n if session[:perm] == 1\n return true;\n else\n return false;\n end\n end", "def dont_delete_admin\n\t\t raise \"You cannot delete the last admin\" if self.id == 1 || User.count == 1\n\t end", "def authorize_as_admin\n raise(ExceptionHandler::AuthenticationError, Message.unauthorized) unless !current_user.nil? && current_user.is_admin?\n end", "def only_for_admins\n raise ActiveRecord::RecordNotFound unless current_user.has_role? :admin\n end", "def custom_permissions\n #Collection Manager Permissions\n #Higher power than edit user...[Dont want edit users to be able to DELETE a COLLECTION??, (Delete a DO?)]\n if current_user.applicable_policy?(SETTING_POLICY_COLLECTION_MANAGER)\n #Marked as being able to :manage_collection\n can :manage_collection_flag, :all\n can :create, [DRI::Batch, DRI::GenericFile]\n end\n\n\n #Admin Permissions\n if current_user.applicable_policy?(SETTING_POLICY_ADMIN)\n can :admin_flag, :all\n #Disabled for now..\n can :manage, :all\n end\n\n #Create_do flag (alias for :edit collection)\n can :create_do, String do |pid|\n test_create(pid)\n end\n\n can :create_do, DRI::Batch do |collection|\n test_create(collection)\n end\n end", "def require_permission\n post = Post.find_by_slug(params[:id])\n if current_user.id != post.user_id\n redirect_to user_post_path(post.user, post), notice: \"Why are you trying to edit something that isn't yours? ಠ_ಠ\"\n end\n end", "def admin_required\n self.current_user != :false && \n self.current_user.is_admin? ? true : access_denied\n end", "def is_admin\n test_access :admin\n end", "def admin_access?\n admin?\n end", "def is_admin\n render status: :unauthorized unless current_user.admin\n end", "def require_admin\n unless (@current_user && @current_user.is_admin?)\n set_notification_messages(I18n.t(\"authentication.permission_denied_heading\"), I18n.t(\"authentication.permission_denied_message\"), :error)\n redirect_to_sign_in_page\n return\n end\n end", "def check_permission\n unless current_user.is_admin == 1\n redirect_to \"/\", warning: \"You don't have permission to access that page.\"\n end\n end" ]
[ "0.7950692", "0.78213316", "0.7780422", "0.77437794", "0.7643459", "0.76227283", "0.7619482", "0.76084095", "0.76039666", "0.7603168", "0.7582445", "0.7582445", "0.7582445", "0.75734454", "0.7561273", "0.75477785", "0.75477785", "0.7531823", "0.7525776", "0.7477768", "0.74687177", "0.7430267", "0.7430267", "0.73938626", "0.73915625", "0.73789024", "0.73772717", "0.7363215", "0.73291594", "0.73184603", "0.72980475", "0.7294699", "0.7279691", "0.7269466", "0.7250847", "0.72443855", "0.7234198", "0.7233145", "0.72195286", "0.72006726", "0.7199909", "0.7197637", "0.7189384", "0.71881175", "0.7182162", "0.7180374", "0.7180374", "0.7164011", "0.7161767", "0.7161767", "0.715464", "0.7154026", "0.71533895", "0.71526307", "0.7146523", "0.7135932", "0.71189463", "0.7108414", "0.7104099", "0.71013874", "0.70943683", "0.70873183", "0.70839745", "0.7076661", "0.7067486", "0.7066406", "0.7055081", "0.7049891", "0.70478433", "0.70457834", "0.7044121", "0.704191", "0.70392376", "0.70329815", "0.7031396", "0.70278513", "0.7014402", "0.70115465", "0.70008606", "0.69946057", "0.69843495", "0.69843495", "0.6981938", "0.6978245", "0.69719446", "0.696724", "0.696724", "0.696722", "0.69620687", "0.6961093", "0.695865", "0.6952355", "0.6950827", "0.69483733", "0.6944201", "0.69414073", "0.69395655", "0.693516", "0.69324934", "0.69127417", "0.69122064" ]
0.0
-1
GET /visit_people GET /visit_people.json
def index @visit_people = VisitPerson.all end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def people\n Sifter.\n get(api_people_url).\n fetch(\"people\", []).\n map { |p| Sifter::Person.new(p) }\n end", "def people\n Sifter.\n get(api_people_url).\n fetch(\"people\", []).\n map { |p| Sifter::Person.new(p) }\n end", "def people\n Birdman::Requester.get(\"movies/#{id}/people\")\n end", "def index\n\t\tall_people = Person.all.sort_by(&:id)\n\t\tif all_people\n\t\t\trender json: {people: all_people}\n\t\telse\n\t\t\trender body: 'People Not Found', status: 404\n\t\tend\n\tend", "def index\n @people = search Person.involved_in(@conference)\n\n respond_to do |format|\n format.html { @people = @people.paginate page: page_param }\n format.json\n end\n end", "def index\n \t@people = Person.all\n respond_to do |format|\n format.json { render json: @people, status: :ok }\n end\n end", "def fetch_people\n @people = People.find(params[:id])\n end", "def index\n @people = Person.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @people }\n end\n end", "def index\n @people = User.order(:nickname).page(params[:page])\n\n respond_to do |format|\n format.html # index.html.haml\n format.json { render :json => @people }\n end\n end", "def people\n Harvest::Resources::Person\n end", "def index\n\t\t@people = People.all\n\t\t#render json: \"test\"\n\t\tresponse = @people\n\t\trender json: response\n\t\treturn response\n\tend", "def people(query={})\n response = self.simple_client.get(\"/api/v1/people?#{query.to_query}\")\n # TODO modularize, re-use\n if response[\"page\"]\n return WillPaginate::Collection.create(response[\"page\"],response[\"per_page\"]) do |pager|\n pager.replace(response[\"people\"])\n pager.total_entries = response[\"total_entries\"]\n end\n else\n return response\n end\n end", "def show\n respond_to do |format|\n people = @mob.user_idz.split(',').map{|i| User.find(i).name }.join(', ')\n format.json { render json: @mob.attributes.merge(people: people, users: @mob.users, date: Time.now.strftime('%-m/%-d/%Y')) }\n end\n end", "def index\n @users = User.all\n render json: { users: @users }, methods: :visit_ids\n end", "def show\n render json: @visit\n end", "def get_people_info(page_size=2)\n\t\toptions = {\n\t\t\theaders: headers,\n\t\t\tquery: { page_size: page_size }\n\t\t}\n\t\tresult = self.class.get(\"/people/#{display_name}/#{email_address}/#{title}\", options)\n\tend", "def person(id)\n get(\"/catalog/people/#{id.to_s}\")\n end", "def index\n @people = Person.paginate(:page => params[:offset], :per_page => 20)\n\n people_hash()\n respond_to do |format|\n format.html\n format.json { render :json => @people_hash }\n format.xml { render :xml => @people_hash }\n end\n end", "def index\n respond_with(@people)\n end", "def index\n @projects_people = ProjectsPerson.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @projects_people }\n end\n end", "def show\n @person_info = PersonInfo.find(params[:id])\n\n render json: @person_info\n end", "def get_person(id)\n self.class.get(url(\"people/#{id}\"), headers: @token.headers).parsed_response\n end", "def index\n @people = People.all\n end", "def people\n success? ? @data['people'] : nil\n end", "def index\n @people = do_search_people(@person, :friends, :with_tags => !request.xhr?,\n :url => hash_for_contacts_path)\n end", "def search_people(query)\n get(\"/v1/search/people\", query: query)\n end", "def show\n @person = get_person(params[:id])\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def show\n @person = get_person(params[:id])\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def show\n\t\tperson = Person.find_by_id(self.params[\"id\"].to_i)\n\t\tif person\n\t\t\trender json: {id: person.id, name: person.name, favoriteCity: person.favoriteCity}\n\t\telse\n\t\t\trender body: 'Person Not Found', status: 404\n\t\tend\n\tend", "def index\n @request_people = RequestPerson.all\n end", "def index\n @people = Person.includes(:registry).all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @people }\n end\n end", "def show\n\t\t@person = Person.find_by(id: params[:id])\n\t\t# render json: @person #skips the view, and just renders out the json\n\tend", "def index\n @people = Person.includes(:names).search(params[:search]).order(\"person_names.last_name ASC\")\n\n if params[:state] == 'in_project'\n @people = @people.joins(user: :memberships)\n end\n\n respond_to do |format|\n format.html { render layout: 'fluid' } # index.html.erb\n format.json { render json: @people }\n end\n \n end", "def show\n @visit = Visit.find(params[:id])\n\n respond_to do |format|\n format.json { render json: @visit }\n end\n end", "def view_people\r\n user = session[:user]\r\n @people = user.get_logged_in_users\r\n \r\n render_partial 'people', :people => @people\r\n \r\n rescue\r\n render_text ''\r\n end", "def get_people\n return @people\n end", "def show\n @user_person = UserPerson.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @user_person }\n end\n end", "def index\n # this is like a 'select * from people' in SQL, but using ORM\n @people = Person.all \n\n # render is the return of our method and at the same time help us to render the object\n render json: @people \n end", "def show\n @person = people_type.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def index\n\n @people = Person.filter(params.slice(:name_like))\n @people = @people.where([\"union_id = ?\", current_person.union_id]) if request.format.json? && !owner?\n @people = @people.where(\"not invited_by_id is null\").order([:last_name, :first_name, :id])\n \n respond_to do |format|\n format.html \n format.json \n end\n end", "def show\n @person = Person.find(params[:id])\n\n respond_to do |format|\n format.json { render :json => @person }\n end\n end", "def visited\n @per_page = params[:per_page] || (current_user.profile_viewed.per_page || 20)\n @profile_visitors = @users = current_user.profile_viewed.paginate(:per_page => @per_page, :page => params[:page], :select => \"users.*, profile_viewers.viewed_at\")\n end", "def index\n @people = Person.all\n end", "def index\n @people = Person.all\n end", "def index\n @people = Person.all\n end", "def index\n @people = Person.all\n end", "def index\n @people = Person.all\n end", "def index\n @people = Person.all\n end", "def index\n @people = Person.all\n end", "def index\n @people = Person.all\n end", "def index\n @people = Person.all\n end", "def index\n @people = Person.all\n end", "def index\n @type_people = TypePerson.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @type_people }\n end\n end", "def show\n @person = Person.find(params[:id])\n @hair_colours = Person.get_hair_colours\n @eye_colours = Person.get_eye_colours\n @heights_feet = Person.get_heights_feet\n @heights_inches = Person.get_heights_inches\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def show\n \t@person = Person.find(params[:id])\n respond_to do |format|\n format.json { render json: @person, status: :ok }\t\n end \t \t\n end", "def index\n @visits = VisitSearch.new.visits current_physician, params[:page]\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @visits }\n end\n end", "def index\n @people_tasks = TasksPerson.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @people_tasks }\n end\n end", "def index\n \t@people = Person.all\n end", "def show\n @visit = Visit.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @visit }\n end\n end", "def index\n @people = Person.all\n respond_with(@people)\n end", "def show\n @person = Person.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def show\n @person = Person.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def show\n @person = Person.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def show\n @person = Person.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def show\n @person = Person.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def show\n @person = Person.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def show\n @person = Person.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def show\n @person = Person.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def show\n @person = Person.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def index\n @person = Person.find((params[:person_id]))\n @person_identification_docs = PersonIdentificationDoc.where(\"person_id = ?\", params[:person_id])\n\n\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render :json => @person_identification_docs }\n end\n end", "def getPeople\n\t\t\t\tbegin\n\t\t\t\t\t# Call getPeopleData method to retrieve data from SalesLoft API\n\t\t\t\t\t# [TODO] LOG [DEBUG MESSAGE]\n\t\t\t\t\tresponse_body = getPeopleData\n\t\t\t\t\t\n\t\t\t\t\t# Return Response\n\t render json: {\n\t \tresponse_data: response_body,\n\t \toperation: \"get_people_data\",\n\t \tstatus: \"success\",\n\t \ttimestamp:Time.now, \n\t \tuuid: SecureRandom.uuid, \n\t \tresponse_code: 200,\n\t \tmessage: \"Data Retrieved\"\n\t }\n \trescue StandardError => e\n \t\t# [TODO] LOG [ERROR MESSAGE]\n \t\trender json: {\n\t \tresponse_data: e.message,\n\t \toperation: \"people_data\",\n\t \tstatus: \"error\",\n\t \ttimestamp:Time.now, \n\t \tuuid: SecureRandom.uuid, \n\t \tresponse_code: 500,\n\t \tmessage: \"Error Occured\"\n\t } \n \tend # End rescue block\n\t\t\tend", "def people(company_id, project_id=nil)\n url = project_id ? \"/projects/#{project_id}\" : \"\"\n url << \"/contacts/people/#{company_id}\"\n records \"person\", url\n end", "def people(company_id, project_id=nil)\n url = project_id ? \"/projects/#{project_id}\" : \"\"\n url << \"/contacts/people/#{company_id}\"\n records \"person\", url\n end", "def show\n @projects_person = ProjectsPerson.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @projects_person }\n end\n end", "def index\n @people = current_organization.people.all\n end", "def visitors\n @per_page = params[:per_page] || (current_user.profile_viewed_by.per_page || 20)\n @profile_visitors = @users = current_user.profile_viewed_by.paginate(:per_page => @per_page, :page => params[:page], :select => \"users.*, profile_viewers.viewed_at\")\n end", "def get\n @person\n end", "def people\n\t\tif logged_in? then\n\t\t\tredirect_to \"/people/#{current_user.login}/#{params[:peoplecontroller]}/#{params[:peopleaction]}\", :status => 301\n\t\telse\n\t\t\tredirect_to '/', :status => 301\n\t\tend\n\tend", "def show\n @waitlist = WaitList.find(params[:id])\n @person = Person.find(@waitlist.people_id)\n @profile = Profile.find_by_person_id(@waitlist.people_id)\n\n respond_to do |format|\n format.html # show.html.haml\n format.json { render json: @waitlist }\n end\n end", "def index\n\t\t@people = Person.all\n\n\t\trespond_to do |format|\n\t\t\tformat.html # index.html.erb\n\t\t\tformat.xml { render :xml => @people }\n\t\tend\n\tend", "def index\n activity = Activity.find_by(id: params[:id])\n\n @people = activity.activity_object.liked_by\n\n respond_to do |format|\n format.html { render layout: false if request.xhr? }\n end\n end", "def profiles \n personid = params[:id]\n @response = JSON.parse(current_user.access_token.token.get('/api/v0/aspects/profiles?ids=['+params[:id]+']'))\n respond_to do |format|\n format.html\n format.json {render :json=> @response, :callback=>params[:callback]}#{render json: @response}\n end\n end", "def find\n @people = Person.where params.slice(:given_name, :family_name, :family_name2, :city_village, :gender)\n\n case @people.size\n when 0\n if Site.master?\n head :not_found\n else\n# find_remote\n end\n when 1\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @people }\n format.json { render :json => @people.to_json }\n end\n else\n respond_to do |format|\n format.html { render :action => 'index', :status => :multiple_choices }\n format.xml { render :xml => @people, :status => :multiple_choices }\n format.json { render :json => @people.to_json, :status => :multiple_choices }\n end\n end\n end", "def show\n @persona = Persona.find(params[:id])\n @users = User.all\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @persona }\n end\n end", "def people\n @people = @user.followings\n respond_to do |format|\n format.js \n end \n end", "def show\n @personnage = Personnage.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @personnage }\n end\n end", "def index\n @root = \"people\"\n \n @people = Person.find(:all)\n end", "def index\n @people = Person.where( :user_id => current_user.id)\n if current_user.role? :admin \n @people = Person.all\n end\n respond_to do |format|\n format.html # index.html.erb\n format.json { render :json => @people }\n end\n end", "def index\n @team_people = TeamPerson.all\n end", "def index\n @people = Person.where( :user_id =>current_user.id)\n end", "def show\n @people = People.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @people }\n end\n end", "def index\n @likes = target.likes.includes(:author => :profile)\n @people = @likes.map(&:author)\n\n respond_to do |format|\n format.all { render :layout => false }\n format.json { render :json => @likes.as_api_response(:backbone) }\n end\n end", "def index\n render json: Visitor.paginate(:page => params[:page], :per_page => 300)\n end", "def show\n @person_interest = PersonInterest.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person_interest }\n end\n end", "def show\n @person = authorize Person.find(params[:id])\n @view_model = PersonViewModel.new(current_user, @person, @conference)\n\n respond_to do |format|\n format.html\n format.json\n end\n end", "def show\n if @person\n render json: @person\n else\n # :not_found is the http status code 404\n render status: :not_found\n end\n end", "def show\n @person = Person.find_by_guid!(params[:id])\n\n respond_to do |format|\n format.json { render :json => PersonPresenter.new(@person, current_user) }\n end\n end", "def show\n @user = User.find_by_name(params[:name])\n @participants = Participant.find_all_by_user_id(@user.id).paginate(:page => params[:page], :per_page => 5)\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @user }\n end\n end", "def index\n @people = Person.order(name: :asc).page(params[:page])\n end", "def index\n @people = Person.all\n respond_to do |format|\n format.json { \n render json: @people, :include => [:roles => { include: [:privileges] } ]\n }\n end\n end" ]
[ "0.7164006", "0.7164006", "0.69264215", "0.6859905", "0.6818042", "0.66891676", "0.66564393", "0.6645254", "0.6614189", "0.65685946", "0.65561116", "0.654978", "0.64583695", "0.63975155", "0.6370652", "0.63647944", "0.6357036", "0.6350222", "0.6349708", "0.63458526", "0.63298404", "0.63177335", "0.63159275", "0.6314525", "0.6300666", "0.62753123", "0.6266541", "0.6266541", "0.62502676", "0.6245455", "0.62356114", "0.62254244", "0.6217118", "0.6213225", "0.62108594", "0.6195395", "0.6194918", "0.6194776", "0.6186741", "0.6174983", "0.61688787", "0.6155821", "0.614131", "0.614131", "0.614131", "0.614131", "0.614131", "0.614131", "0.614131", "0.614131", "0.614131", "0.611052", "0.6104651", "0.60725176", "0.60723203", "0.60700715", "0.6065283", "0.60643166", "0.60578537", "0.6056492", "0.60544103", "0.60544103", "0.60544103", "0.60544103", "0.60544103", "0.60544103", "0.60544103", "0.60544103", "0.60544103", "0.60449034", "0.6044448", "0.6042804", "0.6042804", "0.6035092", "0.6011011", "0.59908575", "0.5976932", "0.5973838", "0.59565884", "0.59560806", "0.5954836", "0.5937235", "0.5927051", "0.59211016", "0.5917182", "0.5916097", "0.5914424", "0.59059817", "0.5896421", "0.5891084", "0.58892643", "0.58763814", "0.5873563", "0.58671194", "0.583927", "0.5827516", "0.5826395", "0.582373", "0.5823241", "0.5815234" ]
0.7042639
2
GET /visit_people/1 GET /visit_people/1.json
def show end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index\n @visit_people = VisitPerson.all\n end", "def people\n Sifter.\n get(api_people_url).\n fetch(\"people\", []).\n map { |p| Sifter::Person.new(p) }\n end", "def people\n Sifter.\n get(api_people_url).\n fetch(\"people\", []).\n map { |p| Sifter::Person.new(p) }\n end", "def person(id)\n get(\"/catalog/people/#{id.to_s}\")\n end", "def fetch_people\n @people = People.find(params[:id])\n end", "def get_person(id)\n self.class.get(url(\"people/#{id}\"), headers: @token.headers).parsed_response\n end", "def show\n @person_info = PersonInfo.find(params[:id])\n\n render json: @person_info\n end", "def people\n Birdman::Requester.get(\"movies/#{id}/people\")\n end", "def index\n\t\tall_people = Person.all.sort_by(&:id)\n\t\tif all_people\n\t\t\trender json: {people: all_people}\n\t\telse\n\t\t\trender body: 'People Not Found', status: 404\n\t\tend\n\tend", "def show\n @visit = Visit.find(params[:id])\n\n respond_to do |format|\n format.json { render json: @visit }\n end\n end", "def index\n \t@people = Person.all\n respond_to do |format|\n format.json { render json: @people, status: :ok }\n end\n end", "def show\n @person = Person.find(params[:id])\n\n respond_to do |format|\n format.json { render :json => @person }\n end\n end", "def show\n\t\t@person = Person.find_by(id: params[:id])\n\t\t# render json: @person #skips the view, and just renders out the json\n\tend", "def show\n @person = get_person(params[:id])\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def show\n @person = get_person(params[:id])\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def index\n @people = Person.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @people }\n end\n end", "def index\n @people = search Person.involved_in(@conference)\n\n respond_to do |format|\n format.html { @people = @people.paginate page: page_param }\n format.json\n end\n end", "def show\n\t\tperson = Person.find_by_id(self.params[\"id\"].to_i)\n\t\tif person\n\t\t\trender json: {id: person.id, name: person.name, favoriteCity: person.favoriteCity}\n\t\telse\n\t\t\trender body: 'Person Not Found', status: 404\n\t\tend\n\tend", "def show\n @user_person = UserPerson.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @user_person }\n end\n end", "def show\n respond_to do |format|\n people = @mob.user_idz.split(',').map{|i| User.find(i).name }.join(', ')\n format.json { render json: @mob.attributes.merge(people: people, users: @mob.users, date: Time.now.strftime('%-m/%-d/%Y')) }\n end\n end", "def show\n @person = people_type.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def index\n @people = User.order(:nickname).page(params[:page])\n\n respond_to do |format|\n format.html # index.html.haml\n format.json { render :json => @people }\n end\n end", "def show\n render json: @visit\n end", "def show\n \t@person = Person.find(params[:id])\n respond_to do |format|\n format.json { render json: @person, status: :ok }\t\n end \t \t\n end", "def show\n @visit = Visit.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @visit }\n end\n end", "def show\n @person = Person.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def show\n @person = Person.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def show\n @person = Person.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def show\n @person = Person.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def show\n @person = Person.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def show\n @person = Person.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def show\n @person = Person.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def show\n @person = Person.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def show\n @person = Person.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def index\n\t\t@people = People.all\n\t\t#render json: \"test\"\n\t\tresponse = @people\n\t\trender json: response\n\t\treturn response\n\tend", "def index\n @users = User.all\n render json: { users: @users }, methods: :visit_ids\n end", "def show\n @projects_person = ProjectsPerson.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @projects_person }\n end\n end", "def people\n Harvest::Resources::Person\n end", "def index\n @projects_people = ProjectsPerson.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @projects_people }\n end\n end", "def show\n @person = Person.find_by_guid!(params[:id])\n\n respond_to do |format|\n format.json { render :json => PersonPresenter.new(@person, current_user) }\n end\n end", "def get\n @person\n end", "def show\n @personnage = Personnage.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @personnage }\n end\n end", "def index\n @person = Person.find((params[:person_id]))\n @person_identification_docs = PersonIdentificationDoc.where(\"person_id = ?\", params[:person_id])\n\n\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render :json => @person_identification_docs }\n end\n end", "def show\n @person_interest = PersonInterest.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person_interest }\n end\n end", "def people(query={})\n response = self.simple_client.get(\"/api/v1/people?#{query.to_query}\")\n # TODO modularize, re-use\n if response[\"page\"]\n return WillPaginate::Collection.create(response[\"page\"],response[\"per_page\"]) do |pager|\n pager.replace(response[\"people\"])\n pager.total_entries = response[\"total_entries\"]\n end\n else\n return response\n end\n end", "def show\n @person = Person.find(params[:id])\n @hair_colours = Person.get_hair_colours\n @eye_colours = Person.get_eye_colours\n @heights_feet = Person.get_heights_feet\n @heights_inches = Person.get_heights_inches\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def show\n @waitlist = WaitList.find(params[:id])\n @person = Person.find(@waitlist.people_id)\n @profile = Profile.find_by_person_id(@waitlist.people_id)\n\n respond_to do |format|\n format.html # show.html.haml\n format.json { render json: @waitlist }\n end\n end", "def index\n @people = Person.includes(:registry).all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @people }\n end\n end", "def index\n respond_with(@people)\n end", "def index\n @people = People.all\n end", "def index\n\n @people = Person.filter(params.slice(:name_like))\n @people = @people.where([\"union_id = ?\", current_person.union_id]) if request.format.json? && !owner?\n @people = @people.where(\"not invited_by_id is null\").order([:last_name, :first_name, :id])\n \n respond_to do |format|\n format.html \n format.json \n end\n end", "def index\n # this is like a 'select * from people' in SQL, but using ORM\n @people = Person.all \n\n # render is the return of our method and at the same time help us to render the object\n render json: @people \n end", "def get_people_info(page_size=2)\n\t\toptions = {\n\t\t\theaders: headers,\n\t\t\tquery: { page_size: page_size }\n\t\t}\n\t\tresult = self.class.get(\"/people/#{display_name}/#{email_address}/#{title}\", options)\n\tend", "def index\n @type_people = TypePerson.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @type_people }\n end\n end", "def show\n @person = authorize Person.find(params[:id])\n @view_model = PersonViewModel.new(current_user, @person, @conference)\n\n respond_to do |format|\n format.html\n format.json\n end\n end", "def show\n @person = Person.find(params[:id])\n @registry = Registry.where('email = ?', @person.email).first\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person }\n end\n end", "def index\n @request_people = RequestPerson.all\n end", "def show\n @persona = Persona.find(params[:id])\n @users = User.all\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @persona }\n end\n end", "def show\n @persona = Persona.find(params[:id])\n\n respond_to do |format|\n format.json { render json: @persona }\n end\n end", "def show\n if @person\n render json: @person\n else\n # :not_found is the http status code 404\n render status: :not_found\n end\n end", "def index\n @people = Person.paginate(:page => params[:offset], :per_page => 20)\n\n people_hash()\n respond_to do |format|\n format.html\n format.json { render :json => @people_hash }\n format.xml { render :xml => @people_hash }\n end\n end", "def index\n @people = Person.all\n end", "def index\n @people = Person.all\n end", "def index\n @people = Person.all\n end", "def index\n @people = Person.all\n end", "def index\n @people = Person.all\n end", "def index\n @people = Person.all\n end", "def index\n @people = Person.all\n end", "def index\n @people = Person.all\n end", "def index\n @people = Person.all\n end", "def index\n @visits = VisitSearch.new.visits current_physician, params[:page]\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @visits }\n end\n end", "def show\n @person = User.friendly.find(params[:id])\n add_breadcrumb @person.nickname, cooperative.person_path(@person)\n\n respond_to do |format|\n format.html # show.html.haml\n format.json { render :json => @person }\n end\n end", "def people\n success? ? @data['people'] : nil\n end", "def index\n @people_tasks = TasksPerson.all\n\n respond_to do |format|\n format.html # index.html.erb\n format.json { render json: @people_tasks }\n end\n end", "def create\n @visit = Visit.new(params[:visit])\n\n respond_to do |format|\n if @visit.save\n format.html {\n redirect_to :controller => 'people', :action => 'edit', :id => @visit.person_id\n }\n format.json { render json: @visit, status: :created, location: @visit }\n else\n format.json { render json: @visit.errors, status: :unprocessable_entity }\n end\n end\n end", "def profiles \n personid = params[:id]\n @response = JSON.parse(current_user.access_token.token.get('/api/v0/aspects/profiles?ids=['+params[:id]+']'))\n respond_to do |format|\n format.html\n format.json {render :json=> @response, :callback=>params[:callback]}#{render json: @response}\n end\n end", "def index\n @people = Person.includes(:names).search(params[:search]).order(\"person_names.last_name ASC\")\n\n if params[:state] == 'in_project'\n @people = @people.joins(user: :memberships)\n end\n\n respond_to do |format|\n format.html { render layout: 'fluid' } # index.html.erb\n format.json { render json: @people }\n end\n \n end", "def show\n @person_search = PersonSearch.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @person_search }\n end\n end", "def get_people\n return @people\n end", "def index\n \t@people = Person.all\n end", "def index\n @people = Person.all\n end", "def show\n @person = Person.find(params[:id])\n end", "def show\n @person = Person.find(params[:id])\n end", "def show\n @person = Person.find(params[:id])\n end", "def show\n @person = Person.find(params[:id])\n end", "def show\n @tasks_person = TasksPerson.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @tasks_person }\n end\n end", "def show\n @site = Site.find(params[:site_id])\n @visit = Visit.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @visit }\n end\n end", "def people(company_id, project_id=nil)\n url = project_id ? \"/projects/#{project_id}\" : \"\"\n url << \"/contacts/people/#{company_id}\"\n records \"person\", url\n end", "def people(company_id, project_id=nil)\n url = project_id ? \"/projects/#{project_id}\" : \"\"\n url << \"/contacts/people/#{company_id}\"\n records \"person\", url\n end", "def show\n @user = User.find_by_name(params[:name])\n @participants = Participant.find_all_by_user_id(@user.id).paginate(:page => params[:page], :per_page => 5)\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @user }\n end\n end", "def show\n @persona = Persona.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @persona }\n end\n end", "def show\n @visitation = Visitation.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @visitation }\n end\n end", "def show\n @type_person = TypePerson.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @type_person }\n end\n end", "def index\n @people = Person.all\n respond_with(@people)\n end", "def show\n @personerium = Personerium.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @personerium }\n end\n end", "def show\n @personaje = Personaje.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.json { render json: @personaje }\n end\n end", "def new\n @person = people_type.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @person }\n end\n end", "def show\n @people = People.find(params[:id])\n\n respond_to do |format|\n format.html # show.html.erb\n format.xml { render :xml => @people }\n end\n end", "def new\n @person = Person.new\n @hair_colours = Person.get_hair_colours\n @eye_colours = Person.get_eye_colours\n @heights_feet = Person.get_heights_feet\n @heights_inches = Person.get_heights_inches\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @person }\n end\n end", "def set_visit_person\n @visit_person = VisitPerson.find(params[:id])\n end", "def view_people\r\n user = session[:user]\r\n @people = user.get_logged_in_users\r\n \r\n render_partial 'people', :people => @people\r\n \r\n rescue\r\n render_text ''\r\n end" ]
[ "0.6920599", "0.6836075", "0.6836075", "0.6777743", "0.67722195", "0.6764108", "0.6759153", "0.67513764", "0.6732482", "0.6655684", "0.6647889", "0.66406447", "0.66394913", "0.6625896", "0.6625896", "0.66253173", "0.65987176", "0.6595223", "0.6590298", "0.6584921", "0.6571143", "0.65467346", "0.65271306", "0.65127766", "0.6471093", "0.6459942", "0.6459942", "0.6459942", "0.6459942", "0.6459942", "0.6459942", "0.6459942", "0.6459942", "0.6459942", "0.64317167", "0.6417889", "0.63918424", "0.63732284", "0.63495827", "0.6346765", "0.6340985", "0.63177145", "0.63163495", "0.6276577", "0.6228637", "0.6224973", "0.6224602", "0.62199295", "0.6215251", "0.6205197", "0.6201378", "0.61892617", "0.61801505", "0.61723304", "0.6148831", "0.613755", "0.61278355", "0.61268973", "0.612428", "0.6112449", "0.6096631", "0.6082858", "0.6082858", "0.6082858", "0.6082858", "0.6082858", "0.6082858", "0.6082858", "0.6082858", "0.6082858", "0.607945", "0.6076725", "0.60739964", "0.60620767", "0.60431635", "0.6040325", "0.60139006", "0.60088164", "0.60082734", "0.6007145", "0.59991", "0.59959286", "0.59959286", "0.59959286", "0.59959286", "0.59918886", "0.59886897", "0.5986006", "0.5986006", "0.59668374", "0.59660155", "0.59625906", "0.5959108", "0.59560937", "0.59540963", "0.5950117", "0.5935803", "0.59179157", "0.59111464", "0.5902835", "0.58978325" ]
0.0
-1
POST /visit_people POST /visit_people.json
def create @visit_person = VisitPerson.new(visit_person_params) respond_to do |format| if @visit_person.save format.html { redirect_to @visit_person, notice: 'Visit person was successfully created.' } format.json { render :show, status: :created, location: @visit_person } else format.html { render :new } format.json { render json: @visit_person.errors, status: :unprocessable_entity } end end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create\n @visit = Visit.new(params[:visit])\n\n respond_to do |format|\n if @visit.save\n format.html {\n redirect_to :controller => 'people', :action => 'edit', :id => @visit.person_id\n }\n format.json { render json: @visit, status: :created, location: @visit }\n else\n format.json { render json: @visit.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = current_user.created_people.new(person_params_with_school)\n if @person.save\n render :show, status: :created, location: api_v1_person_url(@person)\n else\n render json: @person.errors, status: :unprocessable_entity\n end\n end", "def create\n @person = current_user.created_people.new(person_params_with_school)\n if @person.save\n render :show, status: :created, location: api_v2_person_url(@person)\n else\n render json: @person.errors, status: :unprocessable_entity\n end\n end", "def create\n @people = People.new(people_params)\n\n respond_to do |format|\n if @people.save\n format.html { redirect_to root_path, notice: 'Um VIP ' + @people.name.to_s + ' foi criado com sucesso!' }\n format.json { render :show, status: :created, location: @people }\n else\n format.html { render :new }\n format.json { render json: @people.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new(person_params)\n if @person.save\n render :show, status: :created, location: api_v2_person_url(@person)\n else\n render json: @person.errors, status: :unprocessable_entity\n end\n end", "def create\n \n# next line commented out and following line added 2 aug 17 for connecting users to people (step 17)\n# Person.create(person_params)\n current_user.people.create(person_params) # check nomster/flixter code in this area\n\n redirect_to new_person_path # change to redirect to page showing person created, maybe entire tree\n end", "def create\n @person_info = PersonInfo.new(person_info_params(params[:person_info]))\n\n if @person_info.save\n render json: @person_info, status: :created, location: @person_info\n else\n render json: @person_info.errors, status: :unprocessable_entity\n end\n end", "def visit_person_params\n params.require(:visit_person).permit(:visit_id, :person_id)\n end", "def create\n @person = @account.people.new(person_params)\n\n respond_to do |format|\n if @person.save\n format.html { redirect_to @person, notice: 'Person was successfully created.' }\n format.json { render :show, status: :created, location: @person }\n else\n format.html { render :new }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new(person_params)\n\n respond_to do |format|\n if @person.save\n format.html do\n redirect_to people_url, notice: 'Person was successfully created.'\n end\n format.json { render :show, status: :created, location: @person }\n else\n format.html { render :new }\n format.json do\n render json: @person.errors, status: :unprocessable_entity\n end\n end\n end\n end", "def create\n @person = Person.new(person_params)\n\n respond_to do |format|\n if @person.save\n format.html { redirect_to people_path, notice: 'Person was successfully created.' }\n format.json { render action: 'show', status: :created, location: @person }\n else\n format.html { render action: 'new' }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @people = People.new(people_params) \n @people.user_created_id = current_user.id \n respond_to do |format|\n if @people.save\n format.html { redirect_to @people, notice: 'Persona creada con éxito.' }\n format.json { render :show, status: :created, location: @people }\n else\n format.html { render :new }\n format.json { render json: @people.errors, status: :unprocessable_entity }\n end\n end \n end", "def create\n @person = Person.new(person_params)\n\n if @person.save\n render json: { status: 'POST Success', id: @person.id }, status: :ok\n else\n render json: { status: 'Error', message:'Error registering a new person', person: @person.errors }, status: :unprocessable_entity\n end\n end", "def create\n @user_person = UserPerson.new(params[:user_person])\n\n respond_to do |format|\n if @user_person.save\n format.html { redirect_to @user_person, notice: 'User person was successfully created.' }\n format.json { render json: @user_person, status: :created, location: @user_person }\n else\n format.html { render action: \"new\" }\n format.json { render json: @user_person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n \t@person = Person.new(person_params) \t\n respond_to do |format|\n if @person.save\n format.json {render json: @person, status: :ok }\n else\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new\n \tpopulate_attributes(@person, params[:person])\n \n respond_to do |format|\n if @person.save\n @person = Person.find(@person.id)\n populate_attributes(@person, params[:person])\n @person.save\n\n format.html { redirect_to @person, notice: 'Person was successfully created.' }\n format.json { render json: @person, status: :created, location: @person }\n else\n format.html { render action: \"new\" }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @people = People.new(params[:people])\n\n respond_to do |format|\n if @people.save\n flash[:notice] = 'People was successfully created.'\n format.html { redirect_to(@people) }\n format.xml { render :xml => @people, :status => :created, :location => @people }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @people.errors, :status => :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new(params[:person])\n @person.organization = @organization\n\n respond_to do |format|\n if @person.save\n @person.services << Service.new(:service_type_id => 'MEMBERSHIP', :paid => true) if params[:membership]\n @person.services << Service.new(:service_type_id => 'EAB', :paid => true) if params[:eab]\n @person.visits << Visit.new if params[:visiting]\n \n flash[:notice] = 'Person was successfully created.'\n format.html do\n if params[:visiting]\n redirect_to today_visits_path\n else\n redirect_to(person_path(:id => @person))\n end\n end\n format.xml { render :xml => @person, :status => :created, :location => @person }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @person.errors, :status => :unprocessable_entity }\n end\n end\n end", "def create\n filter_sampled_persons_ineligibilties\n\n @person = Person.new(params[:person])\n @provider = Provider.find(params[:provider_id]) unless params[:provider_id].blank?\n\n respond_to do |format|\n if @person.save\n create_relationship_to_participant\n\n path = people_path\n msg = 'Person was successfully created.'\n if @provider\n path = provider_path(@provider)\n msg = \"Person was successfully created for #{@provider}.\"\n end\n format.html { redirect_to(path, :notice => msg) }\n format.json { render :json => @person }\n else\n format.html { render :action => \"new\" }\n format.json { render :json => @person.errors }\n end\n end\n end", "def create\n @politically_exposed_person = current_user.politically_exposed_people\n .build(politically_exposed_person_params)\n\n respond_to do |format|\n if @politically_exposed_person.save\n format.html { redirect_to politically_exposed_people_url,\n notice: 'Politically exposed person was successfully created.' }\n format.json { render :show, status: :created, location: @politically_exposed_person }\n else\n format.html { render :new }\n format.json { render json: @politically_exposed_person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @users_visit = Users::Visit.new(users_visit_params)\n\n respond_to do |format|\n if @users_visit.save\n format.html { redirect_to @users_visit, notice: 'Visit was successfully created.' }\n format.json { render :show, status: :created, location: @users_visit }\n else\n format.html { render :new }\n format.json { render json: @users_visit.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new(params[:person])\n \n #save name(s)\n #params[:name].each {|name| \n # @name = Name.new\n # @name.name = name\n # @name.person_id = @person.id\n # @name.save\n #}\n\n respond_to do |format|\n if @person.save\n flash[:notice] = 'Person was successfully created.'\n format.html { redirect_to(@person) }\n format.xml { render :xml => @person, :status => :created, :location => @person }\n else\n format.html { render :action => \"new\", :layout => \"main\" }\n format.xml { render :xml => @person.errors, :status => :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new(person_params)\n\n respond_to do |format|\n if @person.save\n format.html { redirect_to @person, notice: \"#{@person.name} was successfully created.\" }\n format.json { render :show, status: :created, location: @person }\n else\n format.html { render :new }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new(person_params)\n\n respond_to do |format|\n if @person.save\n format.html { redirect_to @person, notice: 'Person was successfully created.' }\n format.json { render :show, status: :created, location: @person }\n else\n format.html { render :new }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new(person_params)\n\n respond_to do |format|\n if @person.save\n format.html { redirect_to @person, notice: 'Person was successfully created.' }\n format.json { render :show, status: :created, location: @person }\n else\n format.html { render :new }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new(person_params)\n\n respond_to do |format|\n if @person.save\n format.html { redirect_to @person, notice: 'Person was successfully created.' }\n format.json { render :show, status: :created, location: @person }\n else\n format.html { render :new }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new(person_params)\n\n respond_to do |format|\n if @person.save\n format.html { redirect_to @person, notice: 'Person was successfully created.' }\n format.json { render :show, status: :created, location: @person }\n else\n format.html { render :new }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new(person_params)\n\n respond_to do |format|\n if @person.save\n format.html { redirect_to @person, notice: 'Person was successfully created.' }\n format.json { render :show, status: :created, location: @person }\n else\n format.html { render :new }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new(person_params)\n\n respond_to do |format|\n if @person.save\n format.html { redirect_to @person, notice: 'Person was successfully created.' }\n format.json { render :show, status: :created, location: @person }\n else\n format.html { render :new }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new(person_params)\n\n respond_to do |format|\n if @person.save\n format.html { redirect_to @person, notice: 'Person was successfully created.' }\n format.json { render :show, status: :created, location: @person }\n else\n format.html { render :new }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new(person_params)\n\n respond_to do |format|\n if @person.save\n format.html { redirect_to @person, notice: 'Person was successfully created.' }\n format.json { render :show, status: :created, location: @person }\n else\n format.html { render :new }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new(params[:person])\n\n respond_to do |format|\n if @person.save\n format.html { redirect_to @person, notice: 'Person was successfully created.' }\n format.json { render json: @person, status: :created, location: @person }\n else\n format.html { render action: \"new\" }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new(params[:person])\n\n respond_to do |format|\n if @person.save\n format.html { redirect_to @person, notice: 'Person was successfully created.' }\n format.json { render json: @person, status: :created, location: @person }\n else\n format.html { render action: \"new\" }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new(params[:person])\n\n respond_to do |format|\n if @person.save\n format.html { redirect_to @person, notice: 'Person was successfully created.' }\n format.json { render json: @person, status: :created, location: @person }\n else\n format.html { render action: \"new\" }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new(params[:person])\n\n respond_to do |format|\n if @person.save\n format.html { redirect_to @person, notice: 'Person was successfully created.' }\n format.json { render json: @person, status: :created, location: @person }\n else\n format.html { render action: \"new\" }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person.save\n respond_with(@person, :location => people_path)\n end", "def create \n @person = Person.new(person_params)\n respond_to do |format|\n if @person.save!\n format.html { redirect_to @person, notice: 'Person was successfully created.' }\n format.json { render :show, status: :created, location: @person }\n else\n format.html { render :new }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n logger.debug(\"PeopleController.create: person_params: #{person_params.inspect}\")\n\n @person = Person.new(person_params)\n logger.debug(\"PeopleController.create: @person: #{@person.inspect}\")\n\n respond_to do |format|\n if @person.save \n # TBD: setup specs for this: or we will be adding to our 'technical debt'!\n Notifier.notice(@person, 'new account').deliver\n format.html do\n sign_in @person\n flash[:success] = \"Welcome to Talk Invite!\"\n redirect_to @person\n end\n format.json { render json: @person, status: :created, location: @person }\n else\n format.html { render :new }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n\n end", "def create\r\n @person = Person.new(person_params)\r\n set_field\r\n\r\n respond_to do |format|\r\n if @person.save\r\n format.json { render :show, status: :created, location: @person }\r\n else\r\n format.html { render :new }\r\n format.json { render json: @person.errors, status: :unprocessable_entity }\r\n end\r\n end\r\n end", "def create\n @person = Person.new(params[:person])\n\n respond_to do |format|\n if @person.save\n track_activity @person\n format.html { redirect_to @person, notice: 'Person was successfully created.' }\n format.json { render json: @person, status: :created, location: @person }\n else\n format.html { render layout: 'form', action: \"new\" }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new(person_params)\n\n respond_to do |format|\n if @person.save\n format.html { redirect_to @person, notice: 'Person was successfully created.' }\n format.json { render action: 'show', status: :created, location: @person }\n else\n format.html { render action: 'new' }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new(params[:person])\n \n respond_to do |format|\n if @person.save\n format.html { redirect_to \"people/#{@person.id}\" }\n format.json { render :nothing }\n format.xml { render :nothing }\n else \n format.html { render :action => \"new\" }\n end\n end\n end", "def create\n\t\t@person = Person.create(person_params)\n\t\tredirect_to person_url(@person)\n\tend", "def create\n @personnage = Personnage.new(params[:personnage])\n\n respond_to do |format|\n if @personnage.save\n format.html { redirect_to @personnage, notice: 'Personnage was successfully created.' }\n format.json { render json: @personnage, status: :created, location: @personnage }\n else\n format.html { render action: \"new\" }\n format.json { render json: @personnage.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new(person_params)\n\n respond_to do |format|\n if @person.save\n format.js {}\n format.html { redirect_to people_url, notice: \"Person #{@person.first_name} was successfully created.\" }\n format.json { render :show, status: :created, location: @person }\n else\n format.js {}\n format.html { render :edit, status: :unprocessable_entity }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new(params[:person])\n current_organization.people << @person\n if @person.save\n redirect_to people_path, notice: @person.name.to_s + ' was successfully created.'\n else\n render action: \"new\"\n end\n end", "def create\n @team_person = TeamPerson.new(team_person_params)\n\n respond_to do |format|\n if @team_person.save\n format.html { redirect_to @team_person, notice: 'Team person was successfully created.' }\n format.json { render :show, status: :created, location: @team_person }\n else\n format.html { render :new }\n format.json { render json: @team_person.errors, status: :unprocessable_entity }\n end\n end\n end", "def index\n @visit_people = VisitPerson.all\n end", "def create\n @request_person = RequestPerson.new(request_person_params)\n\n respond_to do |format|\n if @request_person.save\n format.html { redirect_to @request_person, notice: 'Request person was successfully created.' }\n format.json { render :show, status: :created, location: @request_person }\n else\n format.html { render :new }\n format.json { render json: @request_person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @persona = Persona.new(params[:persona])\n \n respond_to do |format|\n if @persona.save\n format.json { render :json => @persona,\n :status => :created, :location => @persona }\n else\n format.json { render :json => @persona.errors,\n :status => :unprocessable_entity }\n end\n end\n end", "def create\n @person_interest = PersonInterest.new(params[:person_interest])\n\n respond_to do |format|\n if @person_interest.save\n format.html { redirect_to @person_interest, notice: 'Person interest was successfully created.' }\n format.json { render json: @person_interest, status: :created, location: @person_interest }\n else\n format.html { render action: \"new\" }\n format.json { render json: @person_interest.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n\t\t@person = Person.new unless @current_user\n\t\[email protected] = session[:cas_user] unless @current_user\n\t\tif @current_user || @person.update_attributes(params[:person])\n\t\t\t#Let's check to see if they have any recommended people they match. If so, send them there, otherwise take them away\n\t\t\t@person ||= @current_user\n\t\t\t@matches = @person.similar_to_me\n\t\t\tif @matches.length > 0\n\t\t\t\trender :new_step2\n\t\t\telse\n\t\t\t\turl = session[:user_flow_entry]\n\t\t\t\tsession[:user_flow_entry] = nil\n\t\t\t\turl ||= dashboard_path\n\t\t\t\tredirect_to url, :notice => \"Profile created successfully. Enjoy the new site! And if you need help, check out the learn tab above.\"\n\t\t\tend\n\t\telse\n\t\t\tflash.now[:error] = \"There was an error with the data you entered, please try again!\"\n\t\t\trender :new\n\t\tend\n\tend", "def create\n @person = Person.new(person_params)\n # @person.addresses.build()\n respond_to do |format|\n if @person.save\n # @person.addresses.create\n # @person.companies.create\n\n format.html { redirect_to @person, notice: 'Person was successfully created.' }\n format.json { render :show, status: :created, location: @person }\n else\n @addresses = @person.addresses\n @page_title = 'Add a new person'\n addresses = @person.addresses.build\n companies = @person.companies.build\n format.html { render :new }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n\t\t@person = Person.new(person_params)\n\n\t\trespond_to do |format|\n\t\t\tif @person.save\n\t\t\t\tformat.html { redirect_to @person, notice: 'Person was successfully created.' }\n\t\t\t\tformat.json { render :show, status: :created, location: @person }\n\t\t\telse\n\t\t\t\tformat.html { render :new }\n\t\t\t\tformat.json { render json: @person.errors, status: :unprocessable_entity }\n\t\t\tend\n\t\tend\n\tend", "def create\n authorize!(:create,@user) unless @user.has_access?('People & Legal Team')\n data = params\n matter_ppl_data = data[:matter_people]\n matter_ppl_data.merge!({\n :created_by_user_id => current_user.id,\n :company_id => get_company_id\n })\n @matter_people = @matter.matter_peoples.new(matter_ppl_data)\n @matter_people_client = @matter_people\n filter_employees\n @matter_people.is_active = true\n @matter_people_other = @matter_people\n @matter_people_opposite = @matter_people\n @matter_people_client_representative = @matter_people\n respond_to do |format|\n if @matter_people.save(matter_ppl_data)\n flash[:notice] = \"#{t(:text_matter_people)} \" \"#{t(:flash_was_successful)} \" \"#{t(:text_created)}\"\n format.html { redirect_to matter_matter_peoples_path(@matter) }\n format.xml { head :ok }\n else\n format.html { redirect_to matter_matter_peoples_path(@matter) }\n end\n end\n end", "def create\n @person = Person.new(person_params)\n \n if @person.save\n redirect_to action: 'list'\n else\n render action: 'new'\n end\n end", "def create(data)\n data.each do |response|\n puts person = @person_repository.create_or_find(response)\n homeworld_response = StarwarsService.get_response(response[\"homeworld\"])\n planet = @planet_repository.find(homeworld_response[\"name\"]).first\n person.planet_id = planet.id\n\n if response[\"species\"].empty? == false\n species_response = StarwarsService.get_response(response[\"species\"].first)\n specie = @specie_repository.find(species_response[\"name\"]).first\n person.specie_id = specie.id\n end\n person.save\n end\n end", "def create\n @person = Person.new(person_params)\n authorize! :create, @person\n\n respond_to do |format|\n if @person.save\n format.html { redirect_to @person, notice: t('.create_ok') }\n format.json { render json: @person, status: :created, location: @person }\n else\n format.html { render action: \"new\" }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new(params[:person])\n @heights_feet = Person.get_heights_feet\n @heights_inches = Person.get_heights_inches\n @hair_colours = Person.get_hair_colours\n @eye_colours = Person.get_eye_colours\n\n respond_to do |format|\n if @person.save\n @person.skill_list = params[:person][:skill_list]\n @person.credit_list = params[:person][:credit_list]\n @person.image_upload = params[:person][:image_upload]\n @person.save\n format.html { redirect_to people_url,\n notice: \"Cast member #{@person.full_name} was successfully created\" }\n format.json { render json: @person,\n status: :created, location: @person }\n else\n format.html { render action: \"new\" }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @guest_visit = GuestVisit.new(guest_visit_params)\n\n respond_to do |format|\n if @guest_visit.save\n format.html { redirect_to @guest_visit, notice: 'Guest visit was successfully created.' }\n format.json { render :show, status: :created, location: @guest_visit }\n else\n format.html { render :new }\n format.json { render json: @guest_visit.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n if params['csv_file'].present?\n require 'csv'\n params[:people] = []\n csv_text = File.read(params['csv_file'].path)\n csv = CSV.parse(csv_text, :headers => true)\n csv.each do |row|\n params[:people] << row.to_hash\n end\n people = Person.create(people_params[:people])\n render json: people.as_json(methods: :error_messages), status: :ok\n elsif params['person'].present?\n person = Person.new(person_params)\n if person.save\n render json: person.as_json(methods: :error_messages), status: :ok\n else\n render json: { errors: person.error_messages }, status: :unprocessable_entity\n end\n else\n render json: { message: 'CSV file not found.' }, status: :not_found and return\n end\n end", "def new\n @person = people_type.new\n\n respond_to do |format|\n format.html # new.html.erb\n format.json { render json: @person }\n end\n end", "def create\n @person = Person.new(params[:person].merge :creator_site_id => Site.current_id)\n\n respond_to do |format|\n if @person.save\n format.html { redirect_to(@person, :notice => 'Person was successfully created.') }\n format.xml { render :xml => @person, :status => :created, :location => @person }\n format.json { render :json => @person, :status => :created, :location => @person }\n else\n status = @person.errors.delete(:status) || :unprocessable_entity\n\n format.html { render :action => 'new' }\n format.xml { render :xml => @person.errors, :status => status }\n format.json { render :json => @person.errors, :status => status }\n end\n end\n end", "def create\n @person = Person.new(person_params)\n\n respond_to do |format|\n if @person.save\n format.html { redirect_to admin_person_url(@person), notice: 'Person was successfully created.' }\n format.json { render :show, status: :created, location: @person }\n else\n format.html { render :new }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def people_params\n params.permit(people: [:first_name, :last_name, :email, :phone])\n end", "def create\n @related_person = RelatedPerson.new(related_person_params)\n\n respond_to do |format|\n if @related_person.save\n format.html { redirect_to @related_person, notice: 'Related person was successfully created.' }\n format.json { render :show, status: :created, location: @related_person }\n else\n format.html { render :new }\n format.json { render json: @related_person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @people_number = PeopleNumber.new(people_number_params)\n\n respond_to do |format|\n if @people_number.save\n format.html { redirect_to @people_number, notice: 'People number was successfully created.' }\n format.json { render :show, status: :created, location: @people_number }\n else\n format.html { render :new }\n format.json { render json: @people_number.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @projects_person = ProjectsPerson.new(params[:projects_person])\n\n respond_to do |format|\n if @projects_person.save\n format.html { redirect_to @projects_person, notice: 'Projects person was successfully created.' }\n format.json { render json: @projects_person, status: :created, location: @projects_person }\n else\n format.html { render action: \"new\" }\n format.json { render json: @projects_person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @visit = Visit.new(params[:visit])\n\n respond_to do |format|\n if @visit.save\n format.html { redirect_to @visit, notice: 'Visit was successfully created.' }\n format.json { render json: @visit, status: :created, location: @visit }\n else\n format.html { render action: \"new\" }\n format.json { render json: @visit.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = current_user.people.new(person_params)\n\n respond_to do |format|\n if @person.save\n format.html { redirect_to [:logbook, @person], notice: 'Person was successfully created.' }\n format.json { render :show, status: :created, location: @person }\n format.js\n else\n format.html { redirect_to logbook_people_path, notice: 'Person was not saved.' }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n format.js { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new(person_params)\n\n respond_to do |format|\n if @person.save\n format.html { redirect_to @person, notice: 'Клиент создан' }\n format.json { render :show, status: :created, location: @person }\n else\n format.html { render :new }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new(person_params)\n\n if @person.save\n redirect_to :action => 'list'\n else\n render :action => 'new'\n end\n end", "def create\n @person = Person.new(person_params)\n\n respond_to do |format|\n if @person.save\n format.html { redirect_to @person, notice: \"Person was successfully created.\" }\n flash.alert = \"Person updated\"\n format.json { render :show, status: :created, location: @person }\n else\n format.html { render :new, status: :unprocessable_entity }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create_team_member\n clinics = params[:clinics].split(\", \")\n\n team_member = TeamMember.create(\n email: params[:email],\n first_name: params[:first_name],\n last_name: params[:last_name],\n user: params[:user],\n status: \"Active\",\n role: params[:role]\n )\n\n clinics.each do |clinic|\n Clinic.find(clinic).team_members << team_member\n end\n\n render json: team_member, include: ['clinics']\n end", "def create\n flash[:notice] = \"Visit was successfully created.\" if visit.save\n respond_with(visit)\n end", "def create\n @person = Person.new(params[:person])\n work = Work.find(@person.work_id) if @person.work_id.present?\n expression = Expression.find(@person.expression_id) if @person.expression_id.present?\n manifestation = Manifestation.find(@person.manifestation_id) if @person.manifestation_id.present?\n\n respond_to do |format|\n if @person.save\n if work\n @person.works << work\n end\n if expression\n @person.expressions << expression\n end\n if manifestation\n @person.manifestations << manifestation\n end\n format.html { redirect_to @person, notice: 'Person was successfully created.' }\n format.json { render json: @person, status: :created, location: @person }\n else\n format.html { render action: \"new\" }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n authorize! :edit, Visit\n @visit = Visit.new(visit_params)\n @visit.assigner = current_user\n respond_to do |format|\n if @visit.save\n format.html { redirect_to @visit, falsh: { success: 'Visit was successfully created.' } }\n format.json { render json: @visit}\n else\n format.html { render :new }\n format.json { render json: @visit.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new(params[:person])\n\n respond_to do |format|\n if @person.save\n format.js\n format.json { render json: @person, status: :created, location: @person }\n else\n format.js\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person_expense_record = PersonExpenseRecord.new(person_expense_record_params)\n\n if @person_expense_record.save\n render json: @person_expense_record, status: :created, location: @person_expense_record\n else\n render json: @person_expense_record.errors, status: :unprocessable_entity\n end\n end", "def people_params\n params.require(:person).permit(:name, :surname, :mail, :home, :phone, :age, :pilot, :member, :guest, :status, :id_number)\n end", "def create\n @tasks_person = TasksPerson.new(params[:tasks_person])\n\n respond_to do |format|\n if @tasks_person.save\n format.html { redirect_to @tasks_person, notice: 'Tasks person was successfully created.' }\n format.json { render json: @tasks_person, status: :created, location: @tasks_person }\n else\n format.html { render action: \"new\" }\n format.json { render json: @tasks_person.errors, status: :unprocessable_entity }\n end\n end\n end", "def test_post_request_collection\n params = {\n size: 3,\n employmentTypeUris: ['/dk/atira/pure/person/employmenttypes/academic'],\n employmentStatus: 'ACTIVE'\n }\n response = client.persons.all_complex params: params\n assert_equal response.code, 200\n assert_instance_of HTTP::Response, response\n end", "def create\n @counselledperson = Counselledpersons.new(counselledperson_params)\n\n respond_to do |format|\n if @counselledperson.save\n format.html { redirect_to @counselledperson, notice: 'Counselledpersons was successfully created.' }\n format.json { render :show, status: :created, location: @counselledperson }\n else\n format.html { render :new }\n format.json { render json: @counselledperson.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @patient_visit = PatientVisit.new(patient_visit_params)\n\n respond_to do |format|\n if @patient_visit.save\n format.html { redirect_to @patient_visit, notice: 'Patient visit was successfully created.' }\n format.json { render :show, status: :created, location: @patient_visit }\n else\n format.html { render :new }\n format.json { render json: @patient_visit.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n Rails.logger.info(params)\n # check if all the criterias are filled\n # first I just continue to create iff name, positionl, institution exists\n if params.has_key?(:name) && params.has_key?(:position) && params.has_key?(:institution)\n # check if the person already exists? The person might exists as a mentor of other maybe\n unless Person.exists?(name: params[:name])\n @person = Person.new_person(params[:name], params[:position], params[:institution])\n if @person != nil && @person.save\n render json: @person.as_json, status: :created\n return\n end\n else\n render json: {error: 'person exists'}, status: :bad_request\n end\n end\n #render json: {warning: 'not implemented'}, status: 200\n end", "def create\n @addresses_person = AddressesPerson.new(addresses_person_params)\n\n respond_to do |format|\n if @addresses_person.save\n format.html { redirect_to @addresses_person, notice: 'Addresses person was successfully created.' }\n format.json { render :show, status: :created, location: @addresses_person }\n else\n format.html { render :new }\n format.json { render json: @addresses_person.errors, status: :unprocessable_entity }\n end\n end\n end", "def person_params\n params.require(:person).permit(:name, :tmdb_people_id, :biography, :birthday, :deathday, :place_of_birth, :profile_path_url, :gender)\n end", "def create\n\n if !params[:name].nil? && !params[:email].nil?\n user = User.find_by_email(params[:email])\n if user\n pet = user.pets.create(name:params[:name], observations: params[:observations])\n if pet\n render json: pet, status: :created\n else\n render json: {message: 'There was an error saving pet, please try it again'}, status: :bad_request\n end\n else\n render json: {message: 'There was an error saving pet, please try it again'}, status: :bad_request\n end\n else\n render json: {message: 'Pet name not provided'}, status: :bad_request\n end\n end", "def people\n Sifter.\n get(api_people_url).\n fetch(\"people\", []).\n map { |p| Sifter::Person.new(p) }\n end", "def people\n Sifter.\n get(api_people_url).\n fetch(\"people\", []).\n map { |p| Sifter::Person.new(p) }\n end", "def create\n @person = Person.new(person_params)\n @agreement = Agreement.last\n @person.skip_validation = false\n respond_to do |format|\n if @person.save\n @person.agreements << Agreement.last\n if current_user\n if current_user.user?\n current_user.person_id = @person.id\n current_user.save\n end\n end\n format.html { redirect_to @person, notice: 'Uppgifterna lades till. Tack.' }\n format.json { render :show, status: :created, location: @person }\n else\n format.html { render :new }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def people_params\n params.require(:people).permit(:first_name,:surname, :type_identification, :identification_document, \n :email, :date_birth, :sex, :civil_status, :phone, :cellphone, :address )\n end", "def create\n @contactinfo = Contactinfo.new(params[:contactinfo])\n @contactinfo.user_id = current_user.id\n \n respond_to do |format|\n if @contactinfo.save\n format.html { redirect_to person_path(@contactinfo.people_id), :notice => 'Contactinfo was successfully created.' }\n format.json { render :json => @contactinfo, :status => :created, :location => @contactinfo }\n else\n format.html { render :action => \"new\" }\n format.json { render :json => @contactinfo.errors, :status => :unprocessable_entity }\n end\n end\n end", "def create\n @visit = @patient.visits.new(visit_params)\n\n respond_to do |format|\n if @visit.save\n format.html { redirect_to [@patient, @visit], notice: 'Visit was successfully created.' }\n format.json { render action: 'show', status: :created, location: @visit }\n else\n format.html { render action: 'new' }\n format.json { render json: @visit.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @persona = Persona.new(persona_params)\n\n respond_to do |format|\n if @persona.save\n format.html { redirect_to @persona, notice: 'Datos personales registrados exitosamente.' }\n format.json { render :show, status: :created, location: @persona }\n else\n format.html { render :new }\n format.json { render json: @persona.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @visit = Visit.new(params[:visit])\n\n respond_to do |format|\n if @visit.save\n format.html { redirect_to(@visit, :notice => 'Visit was successfully created.') }\n format.xml { render :xml => @visit, :status => :created, :location => @visit }\n format.json { render :json => @visit, :status => :created, :location => @visit, :methods => [:city_state, :unemployment_rate, :match_country_name, :match_country_stat] }\n else\n format.html { render :action => \"new\" }\n format.xml { render :xml => @visit.errors, :status => :unprocessable_entity }\n format.json { render :json => @visit.errors, :status => :unprocessable_entity }\n end\n end\n end", "def create\n # if “Content-Type” header is set as “application/json” then ...\n if request.content_type == \"application/json\" \n # Person.new is like a \"Insert into People values ...\" in SQL\n @person = Person.new(person_params)\n # .save is the commit to database\n if @person.save\n # :created is the http status code 201\n render json: @person, status: :created\n # :bad_request is the http status code 400\n else\n render json: @person.errors, status: :bad_request #:unprocessable_entity\n end\n else\n render status: :bad_request\n end\n end", "def create\n @person = Person.new(person_params)\n @person.department = Department.first\n @person.city = @person.department.cities.first\n\n respond_to do |format|\n if @person.save\n format.html { redirect_to step2_path(@person), notice: 'Person was successfully created.' }\n format.json { render :show, status: :created, location: @person }\n else\n format.html { render :new }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def create\n @person = Person.new( person_params )\n respond_to do |format|\n if @person.save\n format.html { render :edit, notice: t( 'people.msg.new_ok' )}\n else\n format.html { render :new }\n end\n end\n end", "def set_visit_person\n @visit_person = VisitPerson.find(params[:id])\n end" ]
[ "0.69635516", "0.6725972", "0.67029834", "0.6656723", "0.6522561", "0.6466752", "0.6366526", "0.63440424", "0.6340844", "0.6336101", "0.6274313", "0.62431777", "0.62282056", "0.6205908", "0.6204968", "0.6189914", "0.61597675", "0.6142676", "0.61196166", "0.6108959", "0.6100232", "0.60980797", "0.60640705", "0.605394", "0.605394", "0.605394", "0.605394", "0.605394", "0.605394", "0.605394", "0.605394", "0.6031012", "0.6031012", "0.6031012", "0.6031012", "0.6021988", "0.60047215", "0.6004261", "0.5997093", "0.5995039", "0.5994264", "0.5993644", "0.59814507", "0.5943972", "0.5939746", "0.59343225", "0.59219074", "0.5884789", "0.58846664", "0.5884507", "0.58829474", "0.58817554", "0.5871695", "0.58714175", "0.58610713", "0.5844998", "0.5830503", "0.57984984", "0.5796135", "0.57903004", "0.5787641", "0.57846946", "0.5782522", "0.5782099", "0.57813954", "0.5774373", "0.57624525", "0.57613724", "0.5760724", "0.5752275", "0.57521224", "0.57324034", "0.5719457", "0.5717932", "0.5717548", "0.57170624", "0.57125103", "0.57097054", "0.5705067", "0.5702211", "0.56995696", "0.5688504", "0.5670551", "0.5668548", "0.5663108", "0.5662288", "0.5659213", "0.5655736", "0.56362617", "0.56362617", "0.5628996", "0.5622591", "0.5622423", "0.5622341", "0.56183076", "0.56179774", "0.56129235", "0.56128293", "0.56005687", "0.5593925" ]
0.6923559
1
PATCH/PUT /visit_people/1 PATCH/PUT /visit_people/1.json
def update respond_to do |format| if @visit_person.update(visit_person_params) format.html { redirect_to @visit_person, notice: 'Visit person was successfully updated.' } format.json { render :show, status: :ok, location: @visit_person } else format.html { render :edit } format.json { render json: @visit_person.errors, status: :unprocessable_entity } end end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update\n if request.content_type == \"application/json\"\n # .update is like a \"update people set ...\" in sql\n if @person.update(person_params)\n render json: @person\n else\n render json: @person.errors, status: :not_found\n end\n else\n render status: :bad_request\n end\n end", "def update\n @person = Person.find(params[:id]) \n respond_to do |format|\n if @person.update(person_params)\n format.json { render json: @person, status: :ok }\n else\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n if @person.update(person_params)\n render :show, status: :ok, location: api_v1_person_url(@person)\n else\n render json: @person.errors, status: :unprocessable_entity\n end\n end", "def update\n resource_path = \"/projects/#{project_id}/people/#{id}\"\n Request.put(resource_path, self.to_xml('person'))\n end", "def update\n @person = Person.find(params[:id])\n if @person.update_attributes(person_params)\n render :json => @person\n else\n render json: @person.errors, status: :unprocessable_entity\n end\n\n end", "def update\n errors = {}\n if ! ensure_same_as_logged_person(params['user_id'])\n render_json :status => :forbidden and return\n end\n @person = Person.find_by_guid(params['user_id'])\n if ! @person\n render_json :status => :not_found and return\n end\n if params[:person]\n begin\n if @person.json_update_attributes(params[:person])\n render_json :entry => @person.to_hash(@user, @client) and return\n end\n rescue NoMethodError => e\n errors = e.to_s\n end\n end\n\n render_json :status => :bad_request, :messages => @person.errors.full_messages\n @person = nil\n end", "def update\n @person_info = PersonInfo.find(params[:id])\n\n if @person_info.update(person_info_params(params[:person_info]))\n head :no_content\n else\n render json: @person_info.errors, status: :unprocessable_entity\n end\n end", "def update\n @person = Person.find(params[:id])\n\n populate_attributes(@person, params[:person])\n respond_to do |format|\n \n if @person.save && @person.identifiable_entries.each(&:save!)\n format.html { redirect_to @person, notice: 'Person was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n if @person.seat\n render json: {errors: 'Cannot update a seated person'}, status: 422\n else\n @person.update person_params\n render json: @person\n end\n end", "def update\n if @person.update(person_params)\n render :show, status: :ok, location: api_v2_person_url(@person)\n else\n render json: @person.errors, status: :unprocessable_entity\n end\n end", "def update\n if @person.update(person_params)\n render :show, status: :ok, location: api_v2_person_url(@person)\n else\n render json: @person.errors, status: :unprocessable_entity\n end\n end", "def update\n respond_to do |format|\n if @people.update(people_params)\n format.html { redirect_to root_path(@people), notice: 'VIP ' + @people.name.to_s + ' foi atualizado com sucesso!' }\n format.json { render :show, status: :ok, location: @people }\n else\n format.html { render :edit }\n format.json { render json: @people.errors, status: :unprocessable_entity }\n end\n end\n end", "def update_by_body\n @person = Person.find(person_update_params[:id])\n\n if @person.update_attributes(person_update_params)\n render json: { status: 'PUT Success' }, status: :ok\n else\n render json: { status: 'Error', message:'Error updating person', person: @person.errors }, status: :unprocessable_entity\n end\n end", "def update\n @persona = Persona.find(params[:id])\n \n respond_to do |format|\n if @persona.update_attributes(params[:persona])\n format.json { head :ok }\n else\n format.json { render :json => @persona.errors,\n :status => :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @person.update(person_params)\n format.html { redirect_to people_path, notice: 'Person was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @person = Person.find(params[:id])\n\n respond_to do |format|\n if @person.update_attributes(params[:person])\n format.html { redirect_to @person, notice: 'Person was successfully updated.' }\n format.json { head :ok }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @person = Person.find(params[:id])\n\n respond_to do |format|\n if @person.update_attributes(params[:person])\n format.html { redirect_to @person, notice: 'Person was successfully updated.' }\n format.json { head :ok }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @person.update_attributes(params[:person])\n respond_with(@person)\n end", "def update\n @person = Person.find(params[:id])\n\n respond_to do |format|\n if @person.update_attributes(params[:person])\n format.html { redirect_to @person, notice: 'Person was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @person = Person.find(params[:id])\n\n respond_to do |format|\n if @person.update_attributes(params[:person])\n format.html { redirect_to @person, notice: 'Person was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @person = Person.find(params[:id])\n\n respond_to do |format|\n if @person.update_attributes(params[:person])\n format.html { redirect_to @person, notice: 'Person was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @person = Person.find(params[:id])\n\n respond_to do |format|\n if @person.update_attributes(params[:person])\n format.html { redirect_to @person, notice: 'Person was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @person = Person.find(params[:id])\n\n respond_to do |format|\n if @person.update_attributes(params[:person])\n format.html { redirect_to @person, notice: 'Person was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @person.update(person_params)\n format.html { redirect_to @person, notice: t('.update_ok') }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @request_person.update(request_person_params)\n format.html { redirect_to @request_person, notice: 'Request person was successfully updated.' }\n format.json { render :show, status: :ok, location: @request_person }\n else\n format.html { render :edit }\n format.json { render json: @request_person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @person = Person.find(params[:id])\n @provider = Provider.find(params[:provider_id]) unless params[:provider_id].blank?\n\n respond_to do |format|\n if @person.update_attributes(params[:person])\n\n path = people_path\n msg = 'Person was successfully updated.'\n if @participant\n path = participant_path(@participant, :anchor => \"relationships_tab\")\n msg = 'Person was successfully updated.'\n end\n if @provider\n path = provider_path(@provider)\n msg = \"Person was successfully updated for #{@provider}.\"\n end\n\n format.html { redirect_to(path, :notice => msg) }\n format.json { render :json => @person }\n else\n format.html { render :action => \"edit\" }\n format.json { render :json => @person.errors, :status => :unprocessable_entity }\n end\n end\n end", "def update\n @people.user_updated_id = current_user.id\n respond_to do |format|\n if @people.update(people_params)\n format.html { redirect_to @people, notice: 'Persona actualizada con éxito.' }\n format.json { render :show, status: :ok, location: @people }\n else\n format.html { render :edit }\n format.json { render json: @people.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @person = Person.find(params[:id])\n\n respond_to do |format|\n if @person.update_attributes(params[:person])\n track_activity @person\n format.html { redirect_to @person, notice: 'Person was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render layout: 'form', action: \"edit\" }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @user_person = UserPerson.find(params[:id])\n\n respond_to do |format|\n if @user_person.update_attributes(params[:user_person])\n format.html { redirect_to @user_person, notice: 'User person was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @user_person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n\t\tperson = Person.find_by_id(user_params[\"id\"])\n\t\tif person\n\t\t\tperson.favoriteCity = params[\"update\"]\n\t\t\tif person.save\n\t\t\t\trender json: {id: person.id, name: person.name, favoriteCity: person.favoriteCity}\n\t\t\telse\n\t\t\t\trender body: 'Person Invalid', status: 404\n\t\t\tend\n\t\telse\n\t\t\trender body: 'Person Not Found', status: 404\n\t\tend\n\tend", "def update\n @person = Person.find(params[:id]) \n respond_to do |format|\n if @person.update(person_params)\n format.html { redirect_to @person, notice: \"#{@person.display_name} Person was successfully updated.\" }\n format.json { render :show, status: :ok, location: @person }\n else\n format.html { render :edit }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @person.update(person_params)\n format.html { redirect_to @person, notice: 'Person was successfully updated.' }\n format.json { render :show, status: :ok, location: @person }\n else\n set_associations\n format.html { render :edit }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @person.update(person_params)\n format.html { redirect_to @person, notice: 'Данные клиента обновлены.' }\n format.json { render :show, status: :ok, location: @person }\n else\n format.html { render :edit }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @person.update(person_params)\n format.html { redirect_to @person, notice: 'Person was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @person.update(person_params)\n format.html { redirect_to @person, notice: 'Person was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @person.update(person_params)\n format.html { redirect_to @person, notice: 'Person was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: 'edit' }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @person.update(person_params)\n format.html { redirect_to @person, notice: \"#{@person.name} was successfully updated.\" }\n format.json { render :show, status: :ok, location: @person }\n else\n format.html { render :edit }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @person = Person.find(params[:id])\n @person.delete_offices\n @person.delete_addresses\n @person.delete_phone_numbers\n\n respond_to do |format|\n if @person.update_attributes(params[:person])\n format.html { redirect_to @person, notice: 'Person was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @people = People.find(params[:id])\n\n respond_to do |format|\n if @people.update_attributes(params[:people])\n flash[:notice] = 'People was successfully updated.'\n format.html { redirect_to(@people) }\n format.xml { head :ok }\n else\n format.html { render :action => \"edit\" }\n format.xml { render :xml => @people.errors, :status => :unprocessable_entity }\n end\n end\n end", "def update\n @visit = Visit.find(params[:id])\n\n respond_to do |format|\n if @visit.update_attributes(params[:visit])\n format.json { head :no_content }\n else\n format.json { render json: @visit.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @person.update(person_params)\n format.html { redirect_to @person, notice: 'Person was successfully updated.' }\n format.json { render :show, status: :ok, location: @person }\n else\n format.html { render :edit }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @person.update(person_params)\n format.html { redirect_to @person, notice: 'Person was successfully updated.' }\n format.json { render :show, status: :ok, location: @person }\n else\n format.html { render :edit }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @person.update(person_params)\n format.html { redirect_to @person, notice: 'Person was successfully updated.' }\n format.json { render :show, status: :ok, location: @person }\n else\n format.html { render :edit }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @person.update(person_params)\n format.html { redirect_to @person, notice: 'Person was successfully updated.' }\n format.json { render :show, status: :ok, location: @person }\n else\n format.html { render :edit }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @person.update(person_params)\n format.html { redirect_to @person, notice: 'Person was successfully updated.' }\n format.json { render :show, status: :ok, location: @person }\n else\n format.html { render :edit }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @person.update(person_params)\n format.html { redirect_to @person, notice: 'Person was successfully updated.' }\n format.json { render :show, status: :ok, location: @person }\n else\n format.html { render :edit }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @person.update(person_params)\n format.html { redirect_to @person, notice: 'Person was successfully updated.' }\n format.json { render :show, status: :ok, location: @person }\n else\n format.html { render :edit }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @person.update(person_params)\n format.html { redirect_to @person, notice: 'Person was successfully updated.' }\n format.json { render :show, status: :ok, location: @person }\n else\n format.html { render :edit }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @politically_exposed_person.update(politically_exposed_person_params)\n format.html { redirect_to politically_exposed_people_url,\n notice: 'Politically exposed person was successfully updated.' }\n format.json { render :show, status: :ok, location: @politically_exposed_person }\n else\n format.html { render :edit }\n format.json { render json: @politically_exposed_person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @person.update(person_params)\n format.html do\n redirect_to @person, notice: 'Person was successfully updated.'\n end\n format.json { render :show, status: :ok, location: @person }\n else\n format.html { render :edit }\n format.json do\n render json: @person.errors, status: :unprocessable_entity\n end\n end\n end\n end", "def update\n @patient = Patient.find(params[:id])\n\n respond_to do |format|\n if @patient.update_attributes(params[:patient].except(:person_attributes))\n format.html { redirect_to people_url, notice: 'Patient was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @patient.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @person.update(person_params)\n format.html { redirect_to @person, notice: \"Person was successfully updated.\" }\n format.json { render :show, status: :ok, location: @person }\n else\n format.html { render :edit, status: :unprocessable_entity }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def edit\n\t\t@person = Person.find_by(id: params[:id])\n\t\t# @person.save\n\t\t# render json: @person #skips the view, and just renders out the json\n\tend", "def update!(**args)\n @people = args[:people] if args.key?(:people)\n end", "def update\n respond_to do |format|\n if @person.update(person_params)\n format.js {}\n format.html { redirect_to people_url, notice: \"Person #{@person.first_name} was successfully updated.\" }\n format.json { render :show, status: :ok, location: @person }\n else\n format.js {}\n format.html { render :edit, status: :unprocessable_entity }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @person.update(person_params)\n format.html { redirect_to admin_person_url(@person), notice: 'Person was successfully updated.' }\n format.json { render :show, status: :ok, location: @person }\n else\n format.html { render :edit }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n if params[:person][:company_name]\n params[:person][:company] = Company.find_or_create_by_name(params[:person][:company_name])\n params[:person].delete(:company_name)\n end\n @person = Person.find(params[:id])\n\n authorize! :edit, @person\n \n respond_to do |format|\n if @person.update_attributes(params[:person])\n format.html { redirect_to @person, notice: 'Person was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @visit = Visit.find(params[:id])\n\n if @visit.update(visit_params)\n head :no_content\n else\n render json: @visit.errors, status: :unprocessable_entity\n end\n end", "def update\n @tasks_person = TasksPerson.find(params[:id])\n\n respond_to do |format|\n if @tasks_person.update_attributes(params[:tasks_person])\n format.html { redirect_to @tasks_person, notice: 'Tasks person was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @tasks_person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n\t\trespond_to do |format|\n\t\t\tif @person.update(person_params)\n\t\t\t\tformat.html { redirect_to @person, notice: 'Person was successfully updated.' }\n\t\t\t\tformat.json { render :show, status: :ok, location: @person }\n\t\t\telse\n\t\t\t\tformat.html { render :edit }\n\t\t\t\tformat.json { render json: @person.errors, status: :unprocessable_entity }\n\t\t\tend\n\t\tend\n\tend", "def update\n @people = Person.pluck(:name, :id)\n puts \"*** From update: \" \n\n respond_to do |format|\n if @book.update(book_params)\n format.html { redirect_to @book, notice: 'Book was successfully updated.' }\n format.json { render :show, status: :ok, location: @book }\n else\n format.html { render :edit }\n format.json { render json: @book.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @person_interest = PersonInterest.find(params[:id])\n\n respond_to do |format|\n if @person_interest.update_attributes(params[:person_interest])\n format.html { redirect_to @person_interest, notice: 'Person interest was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @person_interest.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @person.authorizer = current_person\n\n respond_to do |format|\n if @person.update(person_params)\n @person.invite!(current_person) if params['resend_invite']=='true' \n \n format.html { redirect_to :people, notice: 'Profile successfully updated.' }\n format.json { render :show, status: :ok, location: @person }\n else\n format.html { render :edit }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n\t\t@person = Person.find_by(id: params[:id])\n\t\[email protected]_attributes(person_params)\n\n\t\t# Same as below!\n\t\t# @person.name = params[:person][:name]\n\t\t# @person.age = params[:person][:age]\n\t\t# @person.is_a_clown = params[:person][:is_a_clown]\n\t\t# @person.save\n\n\t\t# redirect_to 'index'\n\t\tredirect_to person_url(@person) # the prefix from rake routes, could also use path..\n\tend", "def update\n\t\t @tree = current_user.trees.find(params[:tree_id])\n\n\t if @tree.nil?\n\t \trender json: { errors: [\"No such tree found\"], success: false }, status: :bad_request\n\t end\n\n\t @person = @tree.people.find(params[:id])\n\n \tif @person.nil?\n \t\trender json: { errors: [\"No such person found\"], success: false }, status: :bad_request\n \tend\n\n \tparams.delete :_id\n\n\t relations = {}\n\t\t \tmodified_people = []\n\n\t\t \tif params[:person][:spouses]\n\t\t \t\trelations[:spouses] = params[:person][:spouses]\n\t\t \t\tparams[:person].delete :spouses\n\t\t \tend\n\n\t\t \tif params[:person][:children]\n\t\t \t\trelations[:children] = params[:person][:children]\n\t\t \t\tparams[:person].delete :children\n\t\t \tend\n\n\t\t \tif params[:person][:parents]\n\t\t \t\trelations[:parents] = params[:person][:parents]\n\t\t \t\tparams[:person].delete :parents\n\t\t \tend\n\n \tif @person.update_attributes(params[:person])\n \t\tmodified_people << @person\n\n \t\trelations.each do |k,v|\n\t\t \t\tv.each do |p|\n\t\t \t\t\tputs \"RELATIONS\", relations\n\t\t \t\t\tputs \"HERE\", k, v, p\n\t\t \t\t\trelative = @tree.people.find(p)\n\t\t \t\t\tputs \"THERE\", relative[k], relative.to_json\n\t\t \t\t\trelative.send(k) << @person\n\t\t \t\t\trelative.save\n\t\t \t\t\tmodified_people << relative\n\t\t \t\tend\n\t\t \tend\n\n \trender json: { person: @person, people: modified_people }, status: :accepted\n \telse\n \trender json: { errors: @person.errors, success: false }, status: :unprocessable_entity\n \tend\n\t\t end", "def update\n respond_to do |format|\n if @team_person.update(team_person_params)\n format.html { redirect_to @team_person, notice: 'Team person was successfully updated.' }\n format.json { render :show, status: :ok, location: @team_person }\n else\n format.html { render :edit }\n format.json { render json: @team_person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @projects_person = ProjectsPerson.find(params[:id])\n\n respond_to do |format|\n if @projects_person.update_attributes(params[:projects_person])\n format.html { redirect_to @projects_person, notice: 'Projects person was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @projects_person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @person.update(person_params)\n if current_user\n if current_user.user?\n current_user.person_id = @person.id\n current_user.save\n end\n end\n format.html { redirect_to @person, notice: 'Uppgifterna uppdaterades. Tack' }\n format.json { render :show, status: :ok, location: @person }\n else\n format.html { render :edit }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n # @person = Person.find(params[:id])\n # @person.pct_complete = @person.requirement_progress\n respond_to do |format|\n if @person.update_attributes(params[:person])\n format.html { redirect_to @person, :notice => 'Person was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render :action => \"edit\" }\n format.json { render :json => @person.errors, :status => :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @users_visit.update(users_visit_params)\n format.html { redirect_to @users_visit, notice: 'Visit was successfully updated.' }\n format.json { render :show, status: :ok, location: @users_visit }\n else\n format.html { render :edit }\n format.json { render json: @users_visit.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n\n if @person.update_attributes(params[:person])\n redirect_to people_path, notice: @person.name.to_s + ' was successfully updated.'\n else\n render action: \"edit\"\n end\n end", "def update\n @person = Person.find(params[:id])\n @hair_colours = Person.get_hair_colours\n @eye_colours = Person.get_eye_colours\n @heights_feet = Person.get_heights_feet\n @heights_inches = Person.get_heights_inches\n\n respond_to do |format|\n if @person.update_attributes(params[:person])\n format.html { redirect_to people_url,\n notice: \"Cast member #{@person.full_name} was successfully updated.\" }\n format.json { head :ok }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @person.errors,\n status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @person.update(person_params)\n format.html { redirect_to @person, notice: 'Cadastro atualizado com sucesso!' }\n format.json { render :show, status: :ok, location: @person }\n else\n format.html { render :edit }\n format.json { render json: @person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @persona = Persona.find(params[:id])\n\n respond_to do |format|\n if @persona.update_attributes(params[:persona])\n format.html { redirect_to personas_path, notice: 'Persona was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @persona.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n render json: Company.update(params[\"id\"], params[\"company\"])\n end", "def update\n @personnage = Personnage.find(params[:id])\n\n respond_to do |format|\n if @personnage.update_attributes(params[:personnage])\n format.html { redirect_to @personnage, notice: 'Personnage was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @personnage.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @visit.update(visit_params)\n format.html { redirect_to @visit, notice: 'Visit was successfully updated.' }\n format.json { render :show, status: :ok, location: @visit }\n else\n format.html { render :edit }\n format.json { render json: @visit.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @visit.update(visit_params)\n format.html { redirect_to @visit, notice: 'Visit was successfully updated.' }\n format.json { render :show, status: :ok, location: @visit }\n else\n format.html { render :edit }\n format.json { render json: @visit.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @visit = Visit.find(params[:id])\n\n respond_to do |format|\n if @visit.update_attributes(params[:visit])\n format.html { redirect_to @visit, notice: 'Visit was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @visit.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @visit = Visit.find(params[:id])\n\n respond_to do |format|\n if @visit.update_attributes(params[:visit])\n format.html { redirect_to @visit, notice: 'Visit was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @visit.errors, status: :unprocessable_entity }\n end\n end\n end", "def UpdateView params = {}\n \n APICall(path: 'views.json',method: 'PUT',payload: params.to_json)\n \n end", "def update\n @person = Person.find(params[:id])\n\n respond_to do |format|\n if @person.update_attributes(params[:person])\n format.html { redirect_to(@person, :notice => 'Person was successfully updated.') }\n format.xml { head :ok }\n else\n format.html { render :action => \"edit\" }\n format.xml { render :xml => @person.errors, :status => :unprocessable_entity }\n end\n end\n end", "def update\n @organization_person = OrganizationPerson.find(params[:id])\n\n if @organization_person.update(organization_person_params)\n render json: @organization_person\n else\n render json: @organization_person.errors, status: :unprocessable_entity\n end\n end", "def update\n @person = Person.find(params[:id])\n\n respond_to do |format|\n if @person.update_attributes(params[:person])\n @person.gender = nil if @person.gender.empty?\n @person.gravatar_code = nil if @person.gravatar_code.empty?\n @person.save\n format.html { redirect_to(@person, :notice => 'Person was successfully updated.') }\n format.xml { head :ok }\n else\n format.html { render :action => \"edit\" }\n format.xml { render :xml => @person.errors, :status => :unprocessable_entity }\n end\n end\n end", "def update\n\n\t\trespond_to do |format|\n\t\t\tif @person.update_attributes(params[:person])\n\t\t\t\tformat.html { redirect_to(@person, :notice => 'Person was successfully updated.') }\n\t\t\t\tformat.xml { head :ok }\n\t\t\telse\n\t\t\t\tformat.html { render :action => \"edit\" }\n\t\t\t\tformat.xml { render :xml => @person.errors, :status => :unprocessable_entity }\n\t\t\tend\n\t\tend\n\tend", "def update\n respond_to do |format|\n if @person.update(person_params)\n format.html { redirect_to contestants_path, notice: 'Contestant was successfully updated.' }\n else\n format.html { render :edit }\n end\n end\n end", "def update\n @person = Person.find(params[:id])\n\n respond_to do |format|\n if @person.update_attributes(params[:person])\n flash[:notice] = 'Person was successfully updated.'\n format.html { redirect_to people_url }\n format.xml { head :ok }\n else\n flash[:warning] = 'Please check every information that you are entering and fill the required fields.'\n format.html { render :edit }\n format.xml { render :xml => @person.errors, :status => :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @person.update_attributes(params[:person])\n format.html { redirect_to(@person, :notice => 'Person was successfully updated.') }\n format.xml { head :ok }\n else\n format.html { render :action => \"edit\" }\n format.xml { render :xml => @person.errors, :status => :unprocessable_entity }\n end\n end\n end", "def update\n @person = Person.find(params[:id])\n\n respond_to do |format|\n if @person.update_attributes(params[:person])\n flash[:notice] = 'Person was successfully updated.'\n format.html { redirect_to(person_path) }\n format.xml { head :ok }\n else\n format.html { render :action => \"edit\" }\n format.xml { render :xml => @person.errors, :status => :unprocessable_entity }\n end\n end\n end", "def update\n respond_to do |format|\n if @visit_request.update(visit_request_params)\n format.html { redirect_to @visit_request, notice: 'Visit request was successfully updated.' }\n format.json { render :show, status: :ok, location: @visit_request }\n else\n format.html { render :edit }\n format.json { render json: @visit_request.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @visit = Visit.find(params[:id])\n\n respond_to do |format|\n if @visit.update_attributes(params[:visit])\n format.html { redirect_to patient_path(params[:patient_id]), notice: 'Visit was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @visit.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @missing_person = MissingPerson.find(params[:id])\n\n respond_to do |format|\n if @missing_person.update_attributes(params[:missing_person])\n format.html { redirect_to @missing_person, notice: 'Missing person was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @missing_person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n #Finding the specific chore where the id matches the one we pass in with the body\n @v1_chore = Chore.where(id: params[:id]).first\n #Here we're checking if we have user_id in our body, and if we do, we'll change the selected chore's properties\n #with the parameters of the body, we go through the specific group to our specific chore with the path\n if v1_chore_params[:user_id]\n @v1_chore.user_id = params[:user_id]\n @v1_chore.assigned = true\n if @v1_chore.save\n render :show, status: :ok\n end\n else\n render json: @v1_chore.errors, status: :unprocessable_entity\n end\n end", "def update\n flash[:notice] = \"Person was successfully updated.\" if @person.update(person_params)\n respond_with(@person)\n end", "def update\n @person = Roxiware::Person.find(params[:id])\n _create_or_update(@person)\n end", "def update\n @contactinfo = Contactinfo.find(params[:id])\n\n respond_to do |format|\n if @contactinfo.update_attributes(params[:contactinfo])\n format.html { redirect_to person_path(@contactinfo.people_id), :notice => 'Contactinfo was successfully updated.' }\n format.json { head :ok }\n else\n format.html { render :action => \"edit\" }\n format.json { render :json => @contactinfo.errors, :status => :unprocessable_entity }\n end\n end\n end", "def update\n @key_person = KeyPerson.find(params[:id])\n\n respond_to do |format|\n if @key_person.update_attributes(params[:key_person])\n format.html { redirect_to @key_person, notice: 'Key person was successfully updated.' }\n format.json { head :ok }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @key_person.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @person_type_masters = PersonTypeMaster.all\n @user = User.find_by_person_id(params[:id])\n logger.info \"The selected user -s #{@user.inspect}\"\n respond_to do |format|\n if @person_info.update(person_info_params)\n format.js { flash.now[:notice] = \"Personnel was successfully updated.\" }\n format.html { redirect_to current_user, notice: 'Person info was successfully updated.' }\n format.json { render :show, status: :ok, location: @person_info }\n else\n @person_type_masters = PersonTypeMaster.all\n format.js {render :edit}\n format.html { render :edit }\n format.json { render json: @person_info.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n @personaje = Personaje.find(params[:id])\n\n respond_to do |format|\n if @personaje.update_attributes(params[:personaje])\n format.html { redirect_to @personaje, notice: 'Personaje was successfully updated.' }\n format.json { head :no_content }\n else\n format.html { render action: \"edit\" }\n format.json { render json: @personaje.errors, status: :unprocessable_entity }\n end\n end\n end", "def update\n render json: User.update(params[\"id\"], params[\"user\"])\n end" ]
[ "0.6928483", "0.68815166", "0.67492306", "0.6742536", "0.67113584", "0.66931224", "0.6684623", "0.66673857", "0.66655344", "0.66316587", "0.66316587", "0.6526323", "0.6523786", "0.65052253", "0.649264", "0.6435726", "0.6435726", "0.64292026", "0.64284277", "0.64284277", "0.64284277", "0.64284277", "0.64284277", "0.6425117", "0.6417238", "0.6406564", "0.64053893", "0.6399509", "0.63836193", "0.6382508", "0.63679475", "0.63552505", "0.6342979", "0.633835", "0.633835", "0.633835", "0.63335896", "0.6315465", "0.63074464", "0.629274", "0.62884724", "0.62884724", "0.62884724", "0.62884724", "0.62884724", "0.62884724", "0.62884724", "0.62884724", "0.62822425", "0.62798285", "0.62793916", "0.62612075", "0.62349313", "0.6233694", "0.6214414", "0.6186367", "0.6176538", "0.61753803", "0.61696", "0.61686003", "0.6165248", "0.6153191", "0.6147366", "0.6146472", "0.61291635", "0.6090206", "0.6086152", "0.6080999", "0.6075891", "0.6073683", "0.60705215", "0.6050695", "0.6048796", "0.6047658", "0.60370994", "0.60314846", "0.6031415", "0.6031415", "0.6024813", "0.6024813", "0.60175353", "0.60117865", "0.6004867", "0.60016847", "0.59977865", "0.59851795", "0.5976972", "0.5973045", "0.5967874", "0.59604627", "0.59599507", "0.5953937", "0.594976", "0.5949542", "0.5949057", "0.59448415", "0.5940363", "0.59290206", "0.59280956", "0.5924342" ]
0.694742
0
DELETE /visit_people/1 DELETE /visit_people/1.json
def destroy @visit_person.destroy respond_to do |format| format.html { redirect_to visit_people_url, notice: 'Visit person was successfully destroyed.' } format.json { head :no_content } end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def destroy\n @visit = Visit.find(params[:id])\n @visit.destroy\n\n respond_to do |format|\n format.json { head :no_content }\n end\n end", "def destroy\n @person = Person.find(params[:id])\n @person.destroy\n track_activity @person\n\n respond_to do |format|\n format.html { redirect_to people_url }\n format.json { head :no_content }\n end\n end", "def destroy\n # @person = Person.find(params[:id])\n @person.destroy\n\n respond_to do |format|\n format.html { redirect_to people_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @person = Person.find(params[:id])\n @person.destroy\n\n respond_to do |format|\n format.html { redirect_to people_url }\n format.json { head :ok }\n end\n end", "def destroy\n @person = Person.find(params[:id])\n @person.destroy\n\n respond_to do |format|\n format.html { redirect_to people_url }\n format.json { head :ok }\n end\n end", "def destroy\n @person = Person.find(params[:id])\n @person.destroy\n\n respond_to do |format|\n format.html { redirect_to people_url }\n format.json { head :ok }\n end\n end", "def destroy\n @person = Person.find(params[:id])\n @person.destroy\n\n respond_to do |format|\n format.html { redirect_to people_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @person = Person.find(params[:id])\n @person.destroy\n\n respond_to do |format|\n format.html { redirect_to people_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @person = Person.find(params[:id])\n @person.destroy\n\n respond_to do |format|\n format.html { redirect_to people_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @person = Person.find(params[:id])\n @person.destroy\n\n respond_to do |format|\n format.html { redirect_to people_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @person = Person.find(params[:id])\n @person.destroy\n respond_to do |format|\n format.json { render json: {}, status: :ok }\n end\n end", "def destroy\n @person.destroy\n respond_to do |format|\n format.html { redirect_to people_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @person.destroy\n respond_to do |format|\n format.html { redirect_to people_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @person.destroy\n respond_to do |format|\n format.html { redirect_to people_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @person.destroy\n respond_to do |format|\n format.html { redirect_to people_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @person.destroy\n respond_to do |format|\n format.html { redirect_to people_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @visit.destroy\n respond_to do |format|\n format.html { redirect_to patient_visits_url(@patient) }\n format.json { head :no_content }\n end\n end", "def destroy\n @user_person = UserPerson.find(params[:id])\n @user_person.destroy\n\n respond_to do |format|\n format.html { redirect_to user_people_url }\n format.json { head :no_content }\n end\n end", "def destroy\n ## note use of method chaining to combine find & destroy into one line\n Person.find(params[:id]).destroy\n respond_to do |format|\n format.html { redirect_to people_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @visit = Visit.find(params[:id])\n @visit.destroy\n\n respond_to do |format|\n format.html { redirect_to visits_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @users_visit.destroy\n respond_to do |format|\n format.html { redirect_to users_visits_url, notice: 'Visit was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @person = Person.find(params[:id])\n id = @person.id\n\n if @person.destroy\n render json: { status: 'DELETE Success' }, status: :ok\n else\n render json: { status: 'Error', message:'Error deleting person', person: @person.errors }, status: :unprocessable_entity\n end\n end", "def destroy\n @person = Person.find(params[:id])\n @person.destroy\n\n respond_to do |format|\n format.html { redirect_to root_path }\n format.json { head :no_content }\n end\n end", "def destroy\n @person_info = PersonInfo.find(params[:id])\n @person_info.destroy\n\n head :no_content\n end", "def destroy\n @persona = Persona.find(params[:id])\n @persona.destroy\n\n respond_to do |format|\n format.json { head :ok }\n end\n \n end", "def destroy\n @person = people_type.find(params[:id])\n @person.destroy\n\n respond_to do |format|\n format.html { redirect_to :action => \"index\" }\n format.json { head :ok }\n end\n end", "def destroy\n @person.destroy\n head :no_content\n end", "def destroy\n @person.destroy\n head :no_content\n end", "def destroy\n @visit.destroy\n respond_to do |format|\n format.html { redirect_to visits_url, notice: 'Visit was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @visit = Visit.find(params[:id])\n @visit.destroy\n respond_to do |format|\n format.html { redirect_to patient_visits_path(params[:patient_id]) }\n format.json { head :no_content }\n end\n end", "def destroy\n @people.destroy\n respond_to do |format|\n format.html { redirect_to root_path, notice: 'Um VIP ' + @people.name.to_s + ' foi deletado com sucesso!' }\n format.json { head :no_content }\n end\n end", "def destroy\n @people.destroy\n respond_to do |format|\n format.html { redirect_to people_url, notice: 'Persona eliminada con éxito.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @person.user.destroy\n @person.destroy\n respond_to do |format|\n format.html { redirect_to people_url, notice: 'Person was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @visit = Visit.find(params[:id])\n @visit.destroy\n\n respond_to do |format|\n format.html { redirect_to patient_path (params[:patient_id]) }\n format.json { head :no_content }\n end\n end", "def destroy\n @person.destroy\n respond_to do |format|\n format.html { redirect_to admin_people_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @visit.destroy\n\n head :no_content\n end", "def destroy_by_body\n @person = Person.find(person_delete_param[:id])\n id = @person.id\n\n if @person.destroy\n render json: { status: 'DELETE Success' }, status: :ok\n else\n render json: { status: 'Error', message:'Error deleting person', person: @person.errors }, status: :unprocessable_entity\n end\n end", "def destroy\n @person.destroy\n respond_to do |format|\n format.html { redirect_to people_url, notice: 'Клиент удален из базы.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @visit.destroy\n respond_to do |format|\n format.html { redirect_to visits_url, notice: 'Visita eliminada con exito' }\n format.json { head :no_content }\n end\n end", "def delete\n @person = Person.find_by_guid(params['user_id'])\n if ! @person\n render_json :status => :not_found and return\n end\n if ! ensure_same_as_logged_person(params['user_id'])\n render_json :status => :forbidden and return\n end\n @person.destroy\n @application_session.destroy\n session[:cos_session_id] = nil\n render_json :status => :ok\n end", "def destroy\n @contactinfo = Contactinfo.find(params[:id])\n @people_id = @contactinfo.people_id\n @contactinfo.destroy\n\n respond_to do |format|\n format.html { redirect_to person_path(@people_id) }\n format.json { head :ok }\n end\n end", "def destroy\n @postgresql_view_person.destroy\n respond_to do |format|\n format.html { redirect_to postgresql_view_people_url, notice: 'Postgresql view person was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @person.destroy\n respond_to do |format|\n format.html { redirect_to people_url, notice: 'Uppgifterna om personen raderades.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @personnage = Personnage.find(params[:id])\n @personnage.destroy\n\n respond_to do |format|\n format.html { redirect_to personnages_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @visit.destroy\n respond_to do |format|\n format.html { redirect_to visits_path, falsh: { success: 'Visit was successfully destroyed.' } }\n format.json { render json: {\"data\" => []}}\n end\n end", "def destroy\n @emu_person.destroy\n respond_to do |format|\n format.html { redirect_to emu_people_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @person.destroy\n respond_to do |format|\n format.html { redirect_to people_url, notice: 'Person was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @person.destroy\n respond_to do |format|\n format.html { redirect_to people_url, notice: 'Person was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @person.destroy\n respond_to do |format|\n format.html { redirect_to people_url, notice: 'Person was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @person.destroy\n respond_to do |format|\n format.html { redirect_to people_url, notice: 'Person was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @person.destroy\n respond_to do |format|\n format.html { redirect_to people_url, notice: 'Person was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @person.destroy\n respond_to do |format|\n format.html { redirect_to people_url, notice: 'Person was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @person.destroy\n respond_to do |format|\n format.html { redirect_to people_url, notice: 'Person was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @person.destroy\n respond_to do |format|\n format.html { redirect_to people_url, notice: 'Person was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @person.destroy\n respond_to do |format|\n format.html { redirect_to people_url, notice: 'Person was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @person.destroy\n respond_to do |format|\n format.html { redirect_to people_url, notice: 'Person was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @person.destroy\n respond_to do |format|\n format.html { redirect_to people_url, notice: \"#{@person.name} was successfully destroyed.\" }\n format.json { head :no_content }\n end\n end", "def destroy\n @person.destroy\n respond_to do |format|\n format.html { redirect_to people_url, notice: \"Person was successfully destroyed.\" }\n format.json { head :no_content }\n end\n end", "def destroy\n @person.destroy\n respond_to do |format|\n format.html { redirect_to people_url, notice: 'Person was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @request_person.destroy\n respond_to do |format|\n format.html { redirect_to request_people_url, notice: 'Request person was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @visit_request.destroy\n respond_to do |format|\n format.html { redirect_to visit_requests_url, notice: 'Visit request was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @person = Person.find(params[:id])\n @person.destroy\n\n respond_to do |format|\n format.html { redirect_to(people_url) }\n format.xml { head :ok }\n end\n end", "def destroy\n @person = Person.find(params[:id])\n @person.destroy\n\n respond_to do |format|\n format.html { redirect_to(people_url) }\n format.xml { head :ok }\n end\n end", "def destroy\n @person = Person.find(params[:id])\n @person.destroy\n\n respond_to do |format|\n format.html { redirect_to(people_url) }\n format.xml { head :ok }\n end\n end", "def destroy\n @person = Person.find(params[:id])\n @person.destroy\n\n respond_to do |format|\n format.html { redirect_to(people_url) }\n format.xml { head :ok }\n end\n end", "def destroy\n @person = Person.find(params[:id])\n @person.destroy\n\n respond_to do |format|\n format.html { redirect_to(people_url) }\n format.xml { head :ok }\n end\n end", "def destroy\n @person = Person.find(params[:id])\n @person.destroy\n\n respond_to do |format|\n format.html { redirect_to(people_url) }\n format.xml { head :ok }\n end\n end", "def destroy\n @person = Person.find(params[:id])\n @person.destroy\n\n respond_to do |format|\n format.html { redirect_to(people_url) }\n format.xml { head :ok }\n end\n end", "def destroy\n @person = Person.find(params[:id])\n @person.destroy\n\n respond_to do |format|\n format.html { redirect_to(people_url) }\n format.xml { head :ok }\n end\n end", "def destroy\n @person.destroy\n respond_to do |format|\n format.html { redirect_to people_url, notice: \"Person was successfully destroyed\" }\n format.json { head :no_content }\n end\n end", "def destroy\n @person.destroy\n\n respond_to do |format|\n format.html { redirect_to(people_url) }\n format.xml { head :ok }\n end\n end", "def destroy\n @person.destroy\n\n respond_to do |format|\n format.html { redirect_to(people_url) }\n format.xml { head :ok }\n end\n end", "def destroy\n @patient_visit.destroy\n respond_to do |format|\n format.html { redirect_to patient_visits_url, notice: 'Patient visit was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @persona = Persona.find(params[:id])\n @persona.destroy\n\n respond_to do |format|\n format.html { redirect_to personas_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @persona = Persona.find(params[:id])\n @persona.destroy\n\n respond_to do |format|\n format.html { redirect_to personas_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @persona = Persona.find(params[:id])\n @persona.destroy\n\n respond_to do |format|\n format.html { redirect_to personas_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @person = Person.find(params[:id])\n @person.destroy\n\n authorize! :destroy, @person\n \n respond_to do |format|\n format.html { redirect_to people_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @person.destroy\n respond_to do |format|\n format.html do\n redirect_to people_url(page: index_page),\n notice: 'Person was successfully destroyed.'\n end\n format.json { head :no_content }\n end\n end", "def destroy\n @person.destroy\n respond_to do |format|\n format.html { redirect_to people_url, notice: \"Person #{@person.first_name} was successfully destroyed.\" }\n format.json { head :no_content }\n end\n end", "def destroy\n @person = Person.find(params[:id])\n @person.destroy\n\n redirect_to people_url\n end", "def destroy\n @visitation = Visitation.find(params[:id])\n @visitation.destroy\n\n respond_to do |format|\n format.html { redirect_to visitations_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @person = Person.find(params[:id])\n @person.destroy if @person.users.empty?\n \n respond_to do |format|\n format.html { redirect_to(people_url) }\n format.xml { head :ok }\n end\n end", "def destroy\n @guest_visit.destroy\n respond_to do |format|\n format.html { redirect_to guest_visits_url, notice: 'Guest visit was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @key_person = KeyPerson.find(params[:id])\n @key_person.destroy\n\n respond_to do |format|\n format.html { redirect_to key_people_url }\n format.json { head :ok }\n end\n end", "def delete\n render json: User.delete(params[\"id\"])\n end", "def destroy\n @person_interest = PersonInterest.find(params[:id])\n @person_interest.destroy\n\n respond_to do |format|\n format.html { redirect_to person_interests_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @personaje = Personaje.find(params[:id])\n @personaje.destroy\n\n respond_to do |format|\n format.html { redirect_to personajes_url }\n format.json { head :no_content }\n end\n end", "def delete\n render json: Users.delete(params[\"id\"])\n end", "def destroy\n @person.skills.delete_all\n \n @person.destroy\n respond_to do |format|\n format.html { redirect_to people_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @person = Person.find(params[:id])\n @person.destroy\n\n respond_to do |format|\n format.html { redirect_to people_url }\n format.js\n format.json { head :no_content }\n end\n end", "def destroy\n @person.destroy\n respond_to do |format|\n format.html { redirect_to admin_people_url, notice: 'Person was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @person_info.destroy\n respond_to do |format|\n format.html { redirect_to person_infos_url, notice: 'Person info was successfully destroyed.' }\n format.json { head :no_content }\n end\n end", "def destroy\n @projects_person = ProjectsPerson.find(params[:id])\n @projects_person.destroy\n\n respond_to do |format|\n format.html { redirect_to projects_people_url }\n format.json { head :no_content }\n end\n end", "def destroy\n @people = People.find(params[:id])\n @people.destroy\n\n respond_to do |format|\n format.html { redirect_to(peoples_url) }\n format.xml { head :ok }\n end\n end", "def destroy\n @animal.destroy\n respond_to do |format|\n format.html { redirect(person_animals_url, :delete, 'animal') }\n format.json { head :no_content }\n end\n end", "def delete_tenant_circle(args = {}) \n delete(\"/tenantcircles.json/#{args[:circleId]}\", args)\nend", "def destroy\n @visit = Visit.find(params[:id])\n @visit.destroy\n\n respond_to do |format|\n format.html { redirect_to(visits_url) }\n format.xml { head :ok }\n end\n end", "def destroy\n visit.destroy\n respond_with(visit)\n end", "def destroy\n @type_person = TypePerson.find(params[:id])\n @type_person.destroy\n\n respond_to do |format|\n format.html { redirect_to type_people_url }\n format.json { head :no_content }\n end\n end", "def delete\n Person.find(params[:id]).destroy\n redirect_to action: 'list'\n end" ]
[ "0.7289311", "0.7275337", "0.7267954", "0.7251916", "0.7251916", "0.7251916", "0.72199297", "0.72199297", "0.72199297", "0.72199297", "0.72086364", "0.7166149", "0.7166149", "0.7166149", "0.7166149", "0.7166149", "0.7136831", "0.7117537", "0.70828074", "0.70825285", "0.70672446", "0.7065165", "0.70486903", "0.7048105", "0.70436907", "0.7022865", "0.70134044", "0.70134044", "0.6984697", "0.69786245", "0.6973955", "0.697388", "0.6955903", "0.6955325", "0.69522494", "0.69462866", "0.69316584", "0.69290566", "0.6923212", "0.6906731", "0.68727595", "0.68645203", "0.6860714", "0.6858298", "0.68504095", "0.68490124", "0.68477446", "0.68477446", "0.68477446", "0.68477446", "0.68477446", "0.68477446", "0.68477446", "0.68477446", "0.68477446", "0.68477446", "0.6847313", "0.6846957", "0.68463475", "0.6845685", "0.6843209", "0.6838388", "0.6838388", "0.6838388", "0.6838388", "0.6838388", "0.6838388", "0.6838388", "0.6838388", "0.6832067", "0.6830211", "0.6830211", "0.6828462", "0.6826917", "0.6826917", "0.6826917", "0.68201387", "0.6819323", "0.6813619", "0.68130934", "0.6812166", "0.68106735", "0.67943126", "0.67832917", "0.6767694", "0.67646897", "0.6746523", "0.6738463", "0.67301255", "0.67185295", "0.6712824", "0.6710416", "0.67095095", "0.6704128", "0.6703753", "0.6701032", "0.67003304", "0.6698311", "0.6672737", "0.6669267" ]
0.7716077
0
Use callbacks to share common setup or constraints between actions.
def set_visit_person @visit_person = VisitPerson.find(params[:id]) end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_required_actions\n # TODO: check what fields change to asign required fields\n end", "def action_hook; end", "def run_actions; end", "def define_action_hook; end", "def actions; end", "def define_action_helpers\n if super && action == :save\n @instance_helper_module.class_eval do\n define_method(:valid?) do |*args|\n self.class.state_machines.fire_event_attributes(self, :save, false) { super(*args) }\n end\n end\n end\n end", "def add_actions; end", "def callbacks; end", "def callbacks; end", "def setup *actions, &proc\n (@setup_procs ||= []) << [proc, actions.size > 0 ? actions : [:*]]\n end", "def define_action_helpers; end", "def post_setup\n end", "def action_methods; end", "def action_methods; end", "def action_methods; end", "def before_setup; end", "def action_run\n end", "def execute(setup)\n @action.call(setup)\n end", "def define_action_helpers?; end", "def set_actions\n actions :all\n end", "def action_done(action)\n dispatch = { :migrate => :done_migrating, :map => :done_mapping, :reduce =>\n :done_reducing, :finalize => :done_finalizing } \n self.send dispatch[action[:action]], action\n end", "def dependencies action, &block\n @actions.each do |other|\n if action[:requires].include? other[:provide]\n block.call other\n end\n end\n end", "def setup!\n return unless @setup_procs\n http_actions = actions\n @setup_procs.each do |setup_proc|\n proc, actions = setup_proc\n @setup__actions = actions.map do |action|\n\n action.is_a?(Regexp) ?\n http_actions.select { |a| a.to_s =~ action } :\n action.is_a?(String) && action =~ /\\A\\./ ?\n http_actions.map { |a| a.to_s << action if format?(a).include?(action) }.compact :\n action\n\n end.flatten\n self.class_exec &proc\n @setup__actions = nil\n end\n @setup_procs = nil\n end", "def before_actions(*logic)\n self.before_actions = logic\n end", "def setup_handler\n end", "def set_action(opts)\n opts = check_params(opts,[:actions])\n super(opts)\n end", "def setup(action)\n @targets.clear\n unless action.item.target_filters.empty?\n @targets = SES::TargetManager.make_targets(action)\n else\n item = action.item\n if item.for_opponent?\n @targets = $game_troop.alive_members\n elsif item.for_dead_friend?\n @targets = $game_party.battle_members.select { |actor| actor.dead? }\n else\n $game_party.battle_members.select { |actor| actor.alive? }\n end\n end\n @item_max = @targets.size\n create_contents\n refresh\n show\n activate\n end", "def action; end", "def action; end", "def action; end", "def action; end", "def action; end", "def workflow\n end", "def revisable_shared_setup(args, block)\n class << self\n attr_accessor :revisable_options\n end\n options = args.extract_options!\n self.revisable_options = Options.new(options, &block)\n \n self.send(:include, Common)\n self.send(:extend, Validations) unless self.revisable_options.no_validation_scoping?\n self.send(:include, WithoutScope::QuotedColumnConditions)\n end", "def setup\n @action = SampleActionAndroid.new(os_name: 'android',\n app_name: APP_PATH)\n end", "def before(action)\n invoke_callbacks *self.class.send(action).before\n end", "def process_action(...)\n send_action(...)\n end", "def before_dispatch(env); end", "def after_actions(*logic)\n self.after_actions = logic\n end", "def setup\n # override and do something appropriate\n end", "def setup(client)\n return unless @setup\n actions = @setup['setup'].select { |action| action['do'] }.map { |action| Action.new(action['do']) }\n actions.each do |action|\n action.execute(client)\n end\n self\n end", "def setup(_context)\n end", "def setup(resources) ; end", "def validate_actions\n errors.add(:base, :should_give_at_least_one_action) if !manage? && !forecasting? && !read? && !api?\n end", "def setup\n @resource_config = {\n :callbacks => {\n :before_create => nil,\n :after_create => nil,\n :before_update => nil,\n :after_update => nil,\n :before_destroy => nil,\n :after_destroy => nil,\n },\n :child_assoc => nil,\n :model => nil,\n :parent => nil,\n :path => nil,\n :permission => {},\n :properties => {},\n :relation => {\n :create => nil,\n :delete => nil,\n },\n :roles => nil,\n }\n end", "def determine_valid_action\n\n end", "def process_shared\n handle_taxes\n handle_shippings\n create_adjustments_from_params\n handle_status\n handle_inventory_refunds\n handle_payment_transactions\n order.updater.update\n end", "def startcompany(action)\n @done = true\n action.setup\n end", "def init_actions\n am = action_manager()\n am.add_action(Action.new(\"&Disable selection\") { @selection_mode = :none; unbind_key(32); bind_key(32, :scroll_forward); } )\n am.add_action(Action.new(\"&Edit Toggle\") { @edit_toggle = !@edit_toggle; $status_message.value = \"Edit toggle is #{@edit_toggle}\" })\n end", "def event_callbacks(event, metadata={})\n case event\n when :reset, :review\n if confirmed\n update_attributes(confirmed: false)\n end\n when :confirm\n confirm\n # trigger :order for all applicable items\n # NOTE: :order event is common to both physical and digital items\n items.each do |i|\n if i.event_permitted(:order)\n user_id = last_transition.user_id\n i.trigger!(:order, { order_id: id, user_id: user_id })\n end\n end\n when :complete_work\n request = metadata[:request]\n work_complete_notification(request)\n when :close\n close\n end\n if event != :close && !open\n reopen\n end\n end", "def setup_action\n return unless PONY::ERRNO::check_sequence(current_act)\n new_sequence = @action_sequence[@sequence_index+1...@action_sequence.size]\n @sequence_index = 0\n new_sequence = DND::SkillSequence::ACTS[@acts[1]] + new_sequence\n execute_sequence\n end", "def define_tasks\n define_weave_task\n connect_common_tasks\n end", "def setup(&block)\n define_method(:setup, &block)\n end", "def setup\n transition_to(:setup)\n end", "def setup\n transition_to(:setup)\n end", "def action\n end", "def setup( *args )\n\t\t\tself.class.setupBlocks.each {|sblock|\n\t\t\t\tdebugMsg \"Calling setup block method #{sblock}\"\n\t\t\t\tself.send( sblock )\n\t\t\t}\n\t\t\tsuper( *args )\n\t\tend", "def config(action, *args); end", "def setup\n @setup_proc.call(self) if @setup_proc\n end", "def before_action \n end", "def setup_callbacks\n defined_callbacks.each do |meth|\n unless respond_to?(\"call_#{meth}_callbacks\".to_sym)\n self.class.module_eval <<-EOE\n def call_#{meth}_callbacks(*args)\n plugin_store.each {|a| a.call_#{meth}_callbacks(*args) } if respond_to?(:plugin_store) && plugin_store\n self.send :#{meth}, *args if respond_to?(:#{meth})\n end\n EOE\n end\n end\n end", "def action\n end", "def matt_custom_action_begin(label); end", "def setup\n # override this if needed\n end", "def setup\n\t\t\t\t\t\t# Do nothing\n\t\t\t\tend", "def setup\n\t\t\t\t\t\t# Do nothing\n\t\t\t\tend", "def action(options,&callback)\n new_action = Action===options ? options : Action.new(options,&callback)\n # replace any with (shared name/alias or both default) + same arity\n @actions.delete_if do |existing_action|\n ((existing_action.names & new_action.names).size > 0 ||\n existing_action.default? && new_action.default?) &&\n existing_action.required.size == new_action.required.size &&\n existing_action.optional.size <= new_action.optional.size\n end\n @actions = (@actions + [new_action]).sort\n new_action\n end", "def set_target_and_action target, action\n self.target = target\n self.action = 'sugarcube_handle_action:'\n @sugarcube_action = action\n end", "def after(action)\n invoke_callbacks *options_for(action).after\n end", "def pre_task\n end", "def setup(server)\n server.on('beforeMethod', method(:before_method), 10)\n end", "def add_actions\n attribute = machine.attribute\n name = self.name\n \n owner_class.class_eval do\n define_method(name) {self.class.state_machines[attribute].events[name].fire(self)}\n define_method(\"#{name}!\") {self.class.state_machines[attribute].events[name].fire!(self)}\n define_method(\"can_#{name}?\") {self.class.state_machines[attribute].events[name].can_fire?(self)}\n end\n end", "def init_actions\n @select_action = SelectAction.new\n @endpoint_mouse_action = EndpointMouseAction.new\n @move_action = MoveAction.new\n end", "def setup_signals; end", "def after_created\r\n return unless compile_time\r\n Array(action).each do |action|\r\n run_action(action)\r\n end\r\nend", "def after_created\r\n return unless compile_time\r\n Array(action).each do |action|\r\n run_action(action)\r\n end\r\nend", "def set_target_and_action target, action\n self.target = target\n self.action = 'sugarcube_handle_action:'\n @sugarcube_action = action.respond_to?('weak!') ? action.weak! : action\n end", "def initialize(*args)\n super\n @action = :set\nend", "def after_set_callback; end", "def setup\n #implement in subclass;\n end", "def lookup_action; end", "def setup &block\n if block_given?\n @setup = block\n else\n @setup.call\n end\n end", "def setup_action\n return TSBS.error(@acts[0], 1, @used_sequence) if @acts.size < 2\n actions = TSBS::AnimLoop[@acts[1]]\n if actions.nil?\n show_action_error(@acts[1])\n end\n @sequence_stack.push(@acts[1])\n @used_sequence = @acts[1]\n actions.each do |acts|\n @acts = acts\n execute_sequence\n break if @break_action\n end\n @sequence_stack.pop\n @used_sequence = @sequence_stack[-1]\n end", "def release_actions; end", "def around_hooks; end", "def save_action; end", "def setup(easy)\n super\n easy.customrequest = @verb\n end", "def action_target()\n \n end", "def setup\n callback(:setup) do\n notify(:setup)\n migration_check.last_deployed_commit\n end\n end", "def setup\n return unless @setup\n\n actions = @setup['setup'].select { |action| action['do'] }.map { |action| Action.new(action['do']) }\n run_actions_and_retry(actions)\n self\n end", "def before_setup\n # do nothing by default\n end", "def my_actions(options)\n @setup = false\n get_template_part(\"custom_used\",\"action_users\",true)\n end", "def default_action; end", "def setup(&blk)\n @setup_block = blk\n end", "def callback_phase\n super\n end", "def advice\n end", "def _handle_action_missing(*args); end", "def duas1(action)\n action.call\n action.call\nend", "def shared_action(name, &block)\n @controller.shared_actions[name] = block\n end", "def before_action action, &block\n @audience[:before][action] ||= Set.new\n @audience[:before][action] << block\n end", "def setup_initial_state\n\n state_a = State.new(\"a\", 0)\n state_b = State.new(\"b\", 0)\n state_c = State.new(\"c\", 10)\n\n move_to_b = Action.new(\"move_to_b\", 1, state_b)\n\n move_to_c = Action.new(\"move_to_c\", 1, state_c)\n\n state_a.actions = [move_to_b, move_to_c]\n\n return state_a\n \nend" ]
[ "0.6163163", "0.6045976", "0.5946146", "0.591683", "0.5890051", "0.58349305", "0.5776858", "0.5703237", "0.5703237", "0.5652805", "0.5621621", "0.54210985", "0.5411113", "0.5411113", "0.5411113", "0.5391541", "0.53794575", "0.5357573", "0.53402257", "0.53394014", "0.53321576", "0.53124547", "0.529654", "0.5296262", "0.52952296", "0.52600986", "0.52442724", "0.52385926", "0.52385926", "0.52385926", "0.52385926", "0.52385926", "0.5232394", "0.523231", "0.5227454", "0.52226824", "0.52201617", "0.5212327", "0.52079266", "0.52050185", "0.51754695", "0.51726824", "0.51710224", "0.5166172", "0.5159343", "0.51578903", "0.51522785", "0.5152022", "0.51518047", "0.51456624", "0.51398855", "0.5133759", "0.5112076", "0.5111866", "0.5111866", "0.5110294", "0.5106169", "0.509231", "0.50873137", "0.5081088", "0.508059", "0.50677156", "0.50562143", "0.5050554", "0.50474834", "0.50474834", "0.5036181", "0.5026331", "0.5022976", "0.5015441", "0.50121695", "0.5000944", "0.5000019", "0.4996878", "0.4989888", "0.4989888", "0.49864885", "0.49797225", "0.49785787", "0.4976161", "0.49683493", "0.4965126", "0.4958034", "0.49559742", "0.4954353", "0.49535993", "0.4952725", "0.49467874", "0.49423352", "0.49325448", "0.49282882", "0.49269363", "0.49269104", "0.49252945", "0.4923091", "0.49194667", "0.49174926", "0.49173003", "0.49171105", "0.4915879", "0.49155936" ]
0.0
-1
Never trust parameters from the scary internet, only allow the white list through.
def visit_person_params params.require(:visit_person).permit(:visit_id, :person_id) end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strong_params\n params.require(:user).permit(param_whitelist)\n end", "def strong_params\n params.require(:listing_member).permit(param_whitelist)\n end", "def allow_params_authentication!; end", "def allowed_params\n ALLOWED_PARAMS\n end", "def default_param_whitelist\n [\"mode\"]\n end", "def param_whitelist\n [:role, :title]\n end", "def expected_permitted_parameter_names; end", "def safe_params\n params.except(:host, :port, :protocol).permit!\n end", "def strong_params\n params.require(:team_member).permit(param_whitelist)\n end", "def permitir_parametros\n \t\tparams.permit!\n \tend", "def strong_params\n params.require(:community).permit(param_whitelist)\n end", "def permitted_strong_parameters\n :all #or an array of parameters, example: [:name, :email]\n end", "def strong_params\n params.require(:education).permit(param_whitelist)\n end", "def restricted_params\n #params.require(self.controller_name.classify.underscore.to_sym).permit([])\n raise(\"No strong params set, override restricted_params method in your controller. E.g. params.require(:model).permit(:attribute1, :attribute2)\")\n end", "def allowed_params\n params.require(:user).permit(:username, :email, :password, :password_confirmation)\n end", "def param_whitelist\n [:rating, :review]\n end", "def param_whitelist\n whitelist = [\n :username, :name,\n :parent_id,\n :headline, :description, :video,\n :policy, :signup_mode, :category,\n :website, :facebook, :twitter, :linkedin,\n :founded_at,\n privacy: [\n :events,\n :resources\n ],\n permission: [\n :profile,\n :members,\n :children,\n :statistics,\n :posts,\n :listings,\n :resources,\n :events\n ],\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n \n if action_name === 'update'\n whitelist.delete(:parent_id)\n unless current_user.role_in(@community) === 'owner'\n whitelist.delete(:privacy)\n whitelist.delete(:permission)\n end\n end\n \n whitelist\n end", "def param_whitelist\n if @user.present? && current_user != @user\n return [:followed]\n end\n \n whitelist = [\n :username, :email, :password,\n :first_name, :last_name,\n :birthday, :gender,\n :headline, :biography, :ask_about, :focus,\n :website, :facebook, :linkedin, :twitter, :github,\n roles: [],\n skills: [],\n interests: [],\n privacy: { contact: [] },\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n \n if action_name === 'update'\n whitelist.delete(:email)\n whitelist.delete(:password)\n end\n \n whitelist\n end", "def user_params \n \tparams.require(:user).permit(:name, :email, :password, :password_confirmation)# preventing CSTR\n end", "def user_params\n params.permit(:name, :phoneNumber, :address, :postalCode, :local, :link, :counter, :latitude, :longitude) \n end", "def valid_params_request?; end", "def strong_params\n params.require(:experience).permit(param_whitelist)\n end", "def trim_whitelisted(params, whitelist)\n # remove any parameters that are not whitelisted\n params.each do |key, value|\n # if white listed\n if whitelist.include? key\n # strip the parameters of any extra spaces, save as string\n params[key] = value.to_s.strip\n else\n # delete any unauthorized parameters\n params.delete key\n end\n end\n params\n end", "def whitelist_url_params\n params.require(:whitelist_url).permit(:domain)\n end", "def allowed_params\n params.require(:allowed).permit(:email)\n end", "def permitted_params\n []\n end", "def trim_whitelisted(params, whitelist)\n # remove any parameters that are not whitelisted\n params.each do |key, value|\n # if white listed\n if whitelist.include? key\n # strip the parameters of any extra spaces, save as string\n params[key] = value.to_s.strip\n else\n # delete any unauthorized parameters\n params.delete key\n end\n end\n params\n end", "def safe_params\n params.permit(:id, :name, :origin, :emails => []); #emails is an array\n end", "def query_param\n\t\tparams.permit(:first_name, :last_name, :phone)\n\tend", "def strong_params\n params.require(:success_metric).permit(param_whitelist)\n end", "def devise_filter\r\n logger.debug(\"In devise_filter =>PARAMS: #{params.inspect}\")\r\n\r\n # White list for sign_up\r\n devise_parameter_sanitizer.for(:sign_up) { |u| u.permit(user_whitelist) }\r\n\r\n # White list for account update\r\n devise_parameter_sanitizer.for(:account_update) { |u| u.permit(user_whitelist, :current_password) }\r\n\r\n # White list for Invitation creation\r\n devise_parameter_sanitizer.for(:invite) { |u| u.permit(:account_type, :email, :invitation_token)}\r\n\r\n # White list for accept invitation\r\n devise_parameter_sanitizer.for(:accept_invitation) { |u| u.permit(user_whitelist, :invitation_token)}\r\n\r\n end", "def whitelisted_user_params\n params.require(:user).\n permit( :first_name, :last_name, :email,:password,:password_confirmation,:birthday,:gender)\n end", "def user_params\n ActionController::Parameters.permit_all_parameters = true\n params.require(:user) #.permit(:name, :surname, :phone, :password, :email, :time_zone)\n end", "def strong_params\n params.require(:metric_change).permit(param_whitelist)\n end", "def safe_params\n params.require(:user).permit(:name)\n end", "def get_params\n\t\treturn ActionController::Parameters.new(self.attributes).permit(\"account_id\", \"title\", \"category\", \"introduction\", \"tags\", \"segment_type\", \"visible\", \"status\", \"main_image\")\n\tend", "def grant_params\n @whitelisted = params.require(:grant).permit(:name, :description, :agency_id, :acronym)\n end", "def check_params; true; end", "def param_whitelist\n whitelist = [\n :description,\n :progress,\n :kpi_id\n ]\n \n unless action_name === 'create'\n whitelist.delete(:kpi_id)\n end\n \n whitelist\n end", "def quote_params\n params.permit!\n end", "def valid_params?; end", "def paramunold_params\n params.require(:paramunold).permit!\n end", "def user_params\n\t\tparams.permit(:nickname, :avatar, :description, :password, :gender, :birthday, :email, :phone, :qq_id, :wechat_id)\n\tend", "def filtered_parameters; end", "def user_params\n params.permit(\n \t:id,\n \t:email, \n \t:first_name, \n \t:last_name, \n \t:password, \n \t:confirm_token, \n \t:phone_number,\n \t:facebook_link,\n \t:car_model,\n \t:license_plate)\n end", "def filtering_params\n params.permit(:email, :name)\n end", "def check_params\n true\n end", "def wx_public_params\n params.require(:wx_public).permit(:nickname, :manager, :alias)\n end", "def allowed_params\n params.require(:user).permit(:email, :password, :role, :first_name, :last_name, :password_confirmation)\n end", "def allowed_params\n params.require(:user).permit(:email, :password, :role, :first_name, :last_name, :password_confirmation)\n end", "def listing_params\n\t\tparams.permit(:address, :transit_info, :rules, :other_info, :lat, :lng)\n\tend", "def social_account_params\n\t\t\tparams.require(:social_account).permit!\n\t\tend", "def safe_params\n resurce_name = self.class.resource_name\n params_method_name = \"#{resurce_name}_params\".to_sym\n if params[resurce_name]\n if respond_to?(params_method_name) || private_methods.include?(params_method_name)\n send(params_method_name)\n else\n raise ActiveModel::ForbiddenAttributesError, \"Please, define the '#{params_method_name}' method in #{self.class.name}\"\n end\n end\n end", "def url_params\n params.require(:url).permit(:short_url, :original_url, :clicks, :ip_addresses)\n end", "def user_params\n params.require(:user).permit(:uri, :username, :password, :realname, :email, :publicvisible)\n end", "def model_params\n\t\tparams.require(:manager).permit(\n\t :user_name,\n :password,\n :email,\n \t\t\t)\n\tend", "def article_params_whitelist\n params.require(:article).permit(:title, :description, category_ids: [])\n end", "def college_whitelist_params\n params.require(:college_whitelist).permit(:status)\n end", "def active_code_params\n params[:active_code].permit\n end", "def filtering_params\n params.permit(:email)\n end", "def valid_params(params)\n params.permit(:user_id, :photo_id, :originX, :originY, :width, :height)\n end", "def ip_address_params\n\t\t\tparams.require(:ip_address).permit!\n end", "def pull_request_params\n whitelist = [\n :url,\n :id,\n :html_url,\n :diff_url,\n :patch_url,\n :issue_url,\n :number,\n :state,\n :locked,\n :title\n ]\n params.require(:pull_request).permit(whitelist)\n end", "def reserved_params\n params.require(:reserved).permit(:name, :email, :pax, :address, :KTP, :title)\n end", "def post_params\n if current_user.admin? \n params.permit(:title, :body, :city, :country, :gps_location, :privacy, :visible, :latitude, :longitude, images: [], files: [])\n else \n params.permit(:title, :body, :city, :country, :gps_location, :privacy,:latitude, :longitude, images: [], files: [])\n end \n end", "def list_params\n params.permit(:name)\n end", "def filter_parameters; end", "def filter_parameters; end", "def vineyard_params\n params.permit(:vineyard_name, :email, :website_url, :phone, :address, :city, :region, :postcode, :country, :specialty, :description, :pet_friendly, :holiday, :tours, :events, :family_friendly, :cover_image, :image_one, :image_two, :image_three, :image_four, :user_id, :base64)\n end", "def available_activity_params\n # params.require(:available_activity).permit(:type,:geometry,:properties)\n whitelisted = ActionController::Parameters.new({\n type: params.require(:available_activity)[:type],\n geometry: params.require(:available_activity)[:geometry].try(:permit!).to_h,\n properties: params.require(:available_activity)[:properties].try(:permit!).to_h\n }).try(:permit!)\n end", "def user_params\n params.permit(:name, :username, :email, :password, :img_url, :bg_url, :coinbank)\n end", "def user_params_pub\n\t \tparams[:user].permit(:hruid)\n\t end", "def user_params\n params.permit(:id, :email, :password, :nickname, :status, :avatar, :flat_picture, :flatsharing_id, :member,\n :user, :color, :solde)\n end", "def validate_search_inputs\n @whitelisted = params.fetch(:user, nil)\n if @whitelisted.blank?\n render_error(400, \"#{I18n.t('general_error.params_missing_key')}\": [I18n.t('general_error.params_missing_value', model: \"review\")])\n return\n else\n @whitelisted = @whitelisted.permit(:name, :uen, :description)\n end\n end", "def param_whitelist\n [\n :title,\n :description,\n :organization,\n :team_id,\n :started_at,\n :finished_at,\n location: [\n :description,\n :street,\n :city,\n :state,\n :zip,\n :country,\n :latitude,\n :longitude\n ]\n ]\n end", "def url_whitelist; end", "def admin_social_network_params\n params.require(:social_network).permit!\n end", "def filter_params\n params.require(:filters).permit(:letters)\n end", "def origin_params\n params.permit(:country, :state, :city, :postal_code, :address, :description)\n end", "def valid_params(params)\n params.permit(:login, :first_name, :last_name, \n :password, :password_confirmation)\n end", "def sensitive_params=(params)\n @sensitive_params = params\n end", "def permit_request_params\n params.permit(:address)\n end", "def user_params\n # Ensure a user can't give themselves admin priveleges\n params.delete(:admin) if current_user.admin?\n params.require(:user).permit(:name, :email, :admin, :image)\n end", "def secure_params\n params.require(:location).permit(:name)\n end", "def strong_params\n params.require( :setting ).\n permit( :global_scan_limit, :per_user_scan_limit,\n :target_whitelist_patterns, :target_blacklist_patterns )\n end", "def question_params\n params.require(:survey_question).permit(question_whitelist)\n end", "def case_insensitive_params\n params.require(:case_insensitive).permit(:name)\n end", "def empire_master_no_match_params\n params.require(:empire_master_no_match).permit(:uid, :last_name, :list, :search_date, :double, :source)\n end", "def maintenance_request_params\n params[:maintenance_request].permit! #allow all parameters for now\n end", "def unwanted_params\n params.require(:unwanted).permit(:title, :description, :image)\n end", "def url_params\n params[:url].permit(:full)\n end", "def backend_user_params\n params.permit!\n end", "def filter_params\n\t\treturn params[:candidate].permit(:name_for_filter)\n\tend", "def speed_measurement_params\n\n #fuckit, to lazy to deal with permit crap right now\n ActionController::Parameters.permit_all_parameters = true\n\n params[:speed_measurement]\n end", "def user_params\n params.permit(:name, :age, :username, :display_photo, :password)\n end", "def get_params\r\n #params.require(:article).permit(:title, :permalink, :content, :source_site, :introtext, :type_id, :order_by, :searchable, :created_by, :edited_by, :published_by, :published_on, :user_id)\r\n params.require(:article).permit!\r\n\r\n end", "def pub_params\n params.require(:pub).permit(:name, :description, :phone, :email, :hidden, :city_id, :address)\n end", "def pass_params\n params[:pass].permit(:name, :price, :description, :colour, :events)\n end", "def droptraining_params\n params.permit(:training_id,:user_id, :utf8, :authenticity_token, :commit)\n end", "def person_params\n # params whitelist does *not* include admin, sub, remember_token\n # TBD: share this whitelist with the list used by configuration_permitted_parameters\n # TBD: should current_password be on this list? -- for now, leaving off, since it seems to work without\n # NOTE: do not include 'admin' in this list!\n params.require(:person).permit(\n :name, \n :email, \n :description,\n :password, \n :password_confirmation\n )\n end", "def parameter_params\n params.require(:parameter).permit(:name, :description, :param_code, :param_value, :active_from, :active_to)\n end" ]
[ "0.69792545", "0.6781151", "0.67419964", "0.674013", "0.6734356", "0.6591046", "0.6502396", "0.6496313", "0.6480641", "0.6477825", "0.64565", "0.6438387", "0.63791263", "0.63740575", "0.6364131", "0.63192815", "0.62991166", "0.62978333", "0.6292148", "0.6290449", "0.6290076", "0.62894756", "0.6283177", "0.6242471", "0.62382483", "0.6217549", "0.6214457", "0.6209053", "0.6193042", "0.6177802", "0.6174604", "0.61714715", "0.6161512", "0.6151757", "0.6150663", "0.61461", "0.61213595", "0.611406", "0.6106206", "0.6105114", "0.6089039", "0.6081015", "0.6071004", "0.60620916", "0.6019971", "0.601788", "0.6011056", "0.6010898", "0.6005122", "0.6005122", "0.6001556", "0.6001049", "0.59943926", "0.5992201", "0.59909594", "0.5990628", "0.5980841", "0.59669393", "0.59589154", "0.5958826", "0.5957911", "0.5957385", "0.5953072", "0.59526145", "0.5943361", "0.59386164", "0.59375334", "0.59375334", "0.5933856", "0.59292704", "0.59254247", "0.5924164", "0.59167904", "0.59088355", "0.5907542", "0.59064597", "0.5906243", "0.5898226", "0.589687", "0.5896091", "0.5894501", "0.5894289", "0.5891739", "0.58860534", "0.5882406", "0.587974", "0.58738774", "0.5869024", "0.58679986", "0.5867561", "0.5865932", "0.5864461", "0.58639693", "0.58617616", "0.5861436", "0.5860451", "0.58602303", "0.5854586", "0.58537364", "0.5850427", "0.5850199" ]
0.0
-1
add ras yardstick tasks to namespace :docs
def initialize namespace :docs do config = YAML.load_file(".yardstick.yml") desc "Measure YARD coverage. see yardstick/report.txt for output" require "yardstick/rake/measurement" Yardstick::Rake::Measurement.new(:measure_ras, config) do |measurement| measurement.output = "yardstick/ras_report.txt" end task measure_ras: [:measure_ras_message] # another way to force a dependent task desc "" # empty description so this doesn't show up in rake -T task :measure_ras_message do puts "creating a report in yardstick/ras_report.txt" end desc "Verify YARD coverage" require "yardstick/rake/verify" Yardstick::Rake::Verify.new(:verify_ras, config) end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def doc_task; end", "def define_seattlerb_tasks\n if Hoe.plugins.include? :publish then\n base = \"/data/www/docs.seattlerb.org\"\n rdoc_locations << \"docs-push.seattlerb.org:#{base}/#{remote_rdoc_dir}\"\n end\n end", "def doc_task=(_arg0); end", "def define_asciidoc_tasks\n if defined?(AsciiDocTasks) && File.exist?(\"#{doc_dir}/asciidoc.conf\") && asciidoc_available?\n man_pages = FileList[\"#{doc_dir}/*.[0-9].txt\"]\n articles = FileList[\"#{doc_dir}/*.txt\"] - man_pages\n desc \"Build AsciiDoc under #{doc_dir}\"\n AsciiDocTasks.new('doc:asciidoc') do |t|\n t.source_dir = doc_dir\n t.source_files = articles\n t.doc_type = :article\n t.config_file = \"#{doc_dir}/asciidoc.conf\"\n t.attributes = asciidoc_attributes\n end\n AsciiDocTasks.new('doc:asciidoc') do |t|\n t.source_dir = doc_dir\n t.source_files = man_pages\n t.doc_type = :manpage\n t.config_file = \"#{doc_dir}/asciidoc.conf\"\n t.attributes = asciidoc_attributes\n end\n else\n desc \"Build AsciiDoc (disabled)\"\n task 'asciidoc'\n task 'asciidoc:build'\n task 'asciidoc:clean'\n task 'asciidoc:rebuild'\n end\n task 'doc:build' => 'doc:asciidoc:build'\n task 'doc:clean' => 'doc:asciidoc:clean'\n task 'doc:rebuild' => 'doc:asciidoc:rebuild'\n end", "def rdoc_task_description\n 'Build RDoc HTML files'\n end", "def define_publish_tasks\n Rake::RDocTask.new(:docs) do |rd|\n rd.main = readme_file\n rd.options << '-d' if (`which dot` =~ /\\/dot/) unless\n ENV['NODOT'] || Hoe::WINDOZE\n rd.rdoc_dir = 'doc'\n\n rd.rdoc_files += spec.require_paths\n rd.rdoc_files += spec.extra_rdoc_files\n\n title = \"#{name}-#{version} Documentation\"\n title = \"#{rubyforge_name}'s \" + title if rubyforge_name != name\n\n rd.options << \"-t\" << title\n end\n\n desc 'Generate ri locally for testing.'\n task :ridocs => :clean do\n sh %q{ rdoc --ri -o ri . }\n end\n\n desc 'Publish RDoc to RubyForge.'\n task :publish_docs => [:clean, :docs] do\n config = YAML.load(File.read(File.expand_path(\"~/.rubyforge/user-config.yml\")))\n host = \"#{config[\"username\"]}@rubyforge.org\"\n\n remote_dir = \"/var/www/gforge-projects/#{rubyforge_name}/#{remote_rdoc_dir}\"\n local_dir = 'doc'\n\n sh %{rsync #{rsync_args} #{local_dir}/ #{host}:#{remote_dir}}\n end\n\n # no doco for this one\n task :publish_on_announce do\n with_config do |config, _|\n Rake::Task['publish_docs'].invoke if config[\"publish_on_announce\"]\n end\n end\n\n desc 'Generate email announcement file.'\n task :email do\n require 'rubyforge'\n subject, title, body, urls = announcement\n\n File.open(\"email.txt\", \"w\") do |mail|\n mail.puts \"Subject: [ANN] #{subject}\"\n mail.puts\n mail.puts title\n mail.puts\n mail.puts urls\n mail.puts\n mail.puts body\n mail.puts\n mail.puts urls\n end\n puts \"Created email.txt\"\n end\n\n desc 'Post announcement to blog.'\n task :post_blog do\n require 'xmlrpc/client'\n\n with_config do |config, path|\n break unless config['blogs']\n\n subject, title, body, urls = announcement\n body += \"\\n\\n#{urls}\"\n\n config['blogs'].each do |site|\n server = XMLRPC::Client.new2(site['url'])\n content = site['extra_headers'].merge(:title => title,\n :description => body,\n :categories => blog_categories)\n\n result = server.call('metaWeblog.newPost',\n site['blog_id'],\n site['user'],\n site['password'],\n content,\n true)\n end\n end\n end\n\n desc 'Post announcement to rubyforge.'\n task :post_news do\n require 'rubyforge'\n subject, title, body, urls = announcement\n\n rf = RubyForge.new.configure\n rf.login\n rf.post_news(rubyforge_name, subject, \"#{title}\\n\\n#{body}\")\n puts \"Posted to rubyforge\"\n end\n\n desc 'Create news email file and post to rubyforge.'\n task :announce => [:email, :post_news, :post_blog, :publish_on_announce ]\n end", "def configure_rdoc_task(task)\n task.rdoc_files += @ruby_sources.reject { |file| file =~ /^test|Rakefile/ } + [ \"LICENSE\", \"README.rdoc\" ]\n task.main = \"README.rdoc\"\n task.rdoc_dir = \"rdoc\"\n task.options = @spec.rdoc_options\n end", "def define_publish_tasks\n if need_rdoc then\n task :isolate # ensure it exists\n\n desc \"Generate rdoc\"\n task :docs => [:clobber_docs, :isolate] do\n sh(*make_rdoc_cmd)\n end\n\n desc \"Generate rdoc coverage report\"\n task :dcov => :isolate do\n sh(*make_rdoc_cmd(\"-C\"))\n end\n\n desc \"Remove RDoc files\"\n task :clobber_docs do\n rm_rf local_rdoc_dir\n end\n\n task :clobber => :clobber_docs\n\n desc \"Generate ri locally for testing.\"\n task :ridocs => [:clean, :isolate] do\n sh(*make_rdoc_cmd(\"--ri\", \"-o\", \"ri\"))\n end\n end\n\n desc \"Publish RDoc to wherever you want.\"\n task :publish_docs => [:clean, :docs] do\n publish_docs_task\n end\n\n # no doco for this one\n task :publish_on_announce do\n publish_on_announce_task\n end\n\n desc \"Generate email announcement file.\"\n task :debug_email do\n puts generate_email ENV[\"FULL\"]\n end\n\n desc 'Post announcement to blog. Uses the \"blogs\" array in your hoerc.'\n task :post_blog do\n post_blog_task\n end\n\n desc \"Announce your release.\"\n task :announce => [:post_blog, :publish_on_announce ]\n end", "def rerdoc_task_description\n \"Rebuild RDoc HTML files\"\n end", "def rebuild_docs():\n\tprint 'rebuilding docs'\n\tfrom fabric.api import local, cd\n\tlocal('fab docs')\n\t\t\nTask.add('/vagrant', rebuild_docs)", "def define\n desc rdoc_task_description\n task rdoc_task_name\n\n desc rerdoc_task_description\n task rerdoc_task_name => [clobber_task_name, rdoc_task_name]\n\n desc clobber_task_description\n task clobber_task_name do\n rm_r @rdoc_dir rescue nil\n end\n\n task :clobber => [clobber_task_name]\n\n directory @rdoc_dir\n\n rdoc_target_deps = [\n @rdoc_files,\n Rake.application.rakefile\n ].flatten.compact\n\n task rdoc_task_name => [rdoc_target]\n file rdoc_target => rdoc_target_deps do\n @before_running_rdoc.call if @before_running_rdoc\n args = option_list + @rdoc_files\n\n $stderr.puts \"rdoc #{args.join ' '}\" if Rake.application.options.trace\n RDoc::RDoc.new.document args\n end\n\n self\n end", "def define(&block)\n namespace :dm do\n namespace(:doc,&block)\n end\n\n task 'db:doc' => 'dm:doc'\n end", "def add_tool(file, clazz)\n name = File.basename(file).gsub(/.rb$/, '').gsub(/_task/, '')\n doc_file = File.expand_path(File.dirname(file) + \"/#{name}_documentation.rb\")\n content = clazz.new(name, Rake::application).to_rdoc\n\n File.open(doc_file, 'w') do |f|\n f.write content\n end\n\n CLEAN.add(doc_file)\n\n task :package => doc_file\n task :rdoc => doc_file\nend", "def run_sphinx task\n rsudo \"cd #{current_path} && RAILS_ENV=#{Rubber.env} #{fetch(:rake, 'rake')} #{task}\", :as => runner\n end", "def with_yardoc_task(&block)\n @with_yardoc_task = block\n nil\n end", "def define_tasks\n # Task definitions are taken from Rake::TestTask\n # https://github.com/ruby/rake/blob/e644af3/lib/rake/testtask.rb#L98-L112\n namespace :ttnt do\n namespace name do\n define_run_task\n define_anchor_task\n end\n end\n end", "def clobber_task_description\n \"Remove RDoc HTML files\"\n end", "def docs; end", "def define\n desc \"Generate rokko documentation\"\n task @name do\n # Find README file for `index.html` and delete it from `sources`\n if @options[:generate_index]\n readme_source = @sources.detect { |f| File.basename(f) =~ /README(\\.(md|text|markdown|mdown|mkd|mkdn)$)?/i }\n readme = readme_source ? File.read(@sources.delete(readme_source)) : ''\n end\n\n # Run each file through Rokko and write output\n @sources.each do |filename|\n rokko = Rokko.new(filename, @sources, @options)\n out_dest = File.join(@dest, filename.sub(Regexp.new(\"#{File.extname(filename)}$\"), \".html\"))\n puts \"rokko: #{filename} -> #{out_dest}\"\n FileUtils.mkdir_p File.dirname(out_dest)\n File.open(out_dest, 'wb') { |fd| fd.write(rokko.to_html) }\n end\n\n # Generate index.html if needed\n if @options[:generate_index]\n require 'rokko/index_layout'\n out_dest = File.join(@dest, 'index.html')\n puts \"rokko: #{out_dest}\"\n File.open(out_dest, 'wb') { |fd| fd.write(IndexLayout.new(@sources, readme, @options).render) }\n end\n\n # Run specified file through rokko and use it as index\n if @options[:index] && source_index = @sources.find{|s| s == @options[:index]}\n rokko = Rokko.new(source_index, @sources, @options.merge(preserve_urls: true))\n out_dest = File.join(@dest, 'index.html')\n puts \"rokko: #{source_index} -> index.html\"\n File.open(out_dest, 'wb') { |fd| fd.write(rokko.to_html) }\n end\n\n end\n end", "def task_help(name)\n load_tasks\n\n klass, task = find_by_namespace(name)\n\n # set '$thor_runner' to true to display full namespace\n $thor_runner = true\n\n klass.task_help(shell , task)\n end", "def link_documentation; end", "def connect_common_tasks\n desc \"Build the code narrative HTML\"\n ::Rake::Task.define_task(:codnar => \"codnar_weave\")\n desc \"Remove woven HTML documentation\"\n ::Rake::Task.define_task(\"clobber_codnar\") { rm_rf(@output) }\n ::Rake::Task.define_task(:clobber => \"clobber_codnar\")\n end", "def docs = require_relative 'scaffold/docs'", "def what_it_does() \"Generate javadoc to '#{@name}' folder\" end", "def description\n \"This task runs a web scan and adds webpages with interesting contents\"\nend", "def configure_tasks\n end", "def copy_api_docs_starterkit\n log :copy_api_docs_starterkit, \"\"\n\n mkdir_p \"doc\"\n\n template \"doc/api_doc.mkd.erb\",\n 'doc/api.markdown',\n force: true\n end", "def yardoc_file; end", "def exec_doc\n output = File.join('doc', 'rdoc')\n title = (PACKAGE.capitalize + \" API\").strip\n main = Dir.glob(\"README{,.txt}\", File::FNM_CASEFOLD).first\n template = config.doctemplate || 'html'\n\n opt = []\n opt << \"-U\"\n opt << \"-S\"\n opt << \"--op=#{output}\"\n opt << \"--template=#{template}\"\n opt << \"--title=#{title}\"\n opt << \"--main=#{main}\" if main\n\n if File.exist?('.document')\n files = File.read('.document').split(\"\\n\")\n files.reject!{ |l| l =~ /^\\s*[#]/ || l !~ /\\S/ }\n files.collect!{ |f| f.strip }\n opt << files\n else\n opt << main if main\n opt << [\"lib\", \"ext\"]\n end\n\n opt = opt.flatten\n\n if no_harm?\n puts \"rdoc \" + opt.join(' ').strip\n else\n #sh \"rdoc {opt.join(' ').strip}\"\n require 'rdoc/rdoc'\n ::RDoc::RDoc.new.document(opt)\n end\n end", "def enable_db_doc(target_directory)\n task \"#{task_prefix}:db_doc\"\n task \"#{task_prefix}:pre_build\" => [\"#{task_prefix}:db_doc\"]\n\n (up_dirs + down_dirs).each do |relative_dir_name|\n dirs_for_database(relative_dir_name).each do |dir|\n task \"#{task_prefix}:db_doc\" => Dbt::DbDoc.define_doc_tasks(dir, \"#{target_directory}/#{relative_dir_name}\")\n end\n end\n end", "def define_tasks!\n\n define_test_tasks! if has_tests?\n define_rspec_tasks! if has_specs?\n\n namespace(@task_namespace) do\n desc \"Updates the filelist in the gemspec file\"\n task(:manifest) { manifest_task }\n\n desc \"Builds the .gem package\"\n task(:build => :manifest) { build_task }\n\n desc \"Sets the version of the gem in the gemspec\"\n task(:set_version => [:check_version, :check_current_branch]) { version_task }\n task(:check_version => :fetch_origin) { check_version_task }\n\n task(:fetch_origin) { fetch_origin_task }\n task(:check_current_branch) { check_current_branch_task }\n task(:check_clean_status) { check_clean_status_task }\n task(:check_not_diverged => :fetch_origin) { check_not_diverged_task }\n\n checks = [:check_current_branch, :check_clean_status, :check_not_diverged, :check_version]\n checks.unshift('spec:basic') if has_specs?\n checks.unshift('test:basic') if has_tests?\n # checks.push << [:check_rubyforge] if gemspec.rubyforge_project\n\n desc \"Perform all checks that would occur before a release\"\n task(:release_checks => checks)\n\n release_tasks = [:release_checks, :set_version, :build, :github_release, :gemcutter_release]\n # release_tasks << [:rubyforge_release] if gemspec.rubyforge_project\n\n desc \"Release a new version of the gem using the VERSION environment variable\"\n task(:release => release_tasks) { release_task }\n \n namespace(:release) do\n desc \"Release the next version of the gem, by incrementing the last version segment by 1\"\n task(:next => [:next_version] + release_tasks) { release_task }\n\n desc \"Release the next version of the gem, using a patch increment (0.0.1)\"\n task(:patch => [:next_patch_version] + release_tasks) { release_task }\n\n desc \"Release the next version of the gem, using a minor increment (0.1.0)\"\n task(:minor => [:next_minor_version] + release_tasks) { release_task }\n\n desc \"Release the next version of the gem, using a major increment (1.0.0)\"\n task(:major => [:next_major_version] + release_tasks) { release_task }\n end\n\n # task(:check_rubyforge) { check_rubyforge_task }\n # task(:rubyforge_release) { rubyforge_release_task }\n task(:gemcutter_release) { gemcutter_release_task }\n task(:github_release => [:commit_modified_files, :tag_version]) { github_release_task }\n task(:tag_version) { tag_version_task }\n task(:commit_modified_files) { commit_modified_files_task }\n\n task(:next_version) { next_version_task }\n task(:next_patch_version) { next_version_task(:patch) }\n task(:next_minor_version) { next_version_task(:minor) }\n task(:next_major_version) { next_version_task(:major) }\n \n desc \"Updates the gem release tasks with the latest version on Github\"\n task(:update_tasks) { update_tasks_task }\n end\n end", "def define_tasks\n private_methods.grep(/^define_(\\w+)_tasks$/).each do |meth| \n namespace_name = meth.match(/^define_(\\w+)_tasks$/)[1]\n send(meth)\n end\n end", "def do_docs_debug( task, args )\n\t\tself.prompt.say( \"Docs are published to:\", color: :bright_green )\n\t\tif ( publish_url = self.publish_to )\n\t\t\tself.prompt.say( self.indent(publish_url, 4) )\n\t\telse\n\t\t\tself.prompt.say( self.indent(\"n/a\"), color: :bright_yellow )\n\t\tend\n\t\tself.prompt.say( \"\\n\" )\n\tend", "def description\n \"This task hits the Corpwatch API and creates an object for all found entities.\"\nend", "def create_docs\n directory 'templates/docs', 'docs'\nend", "def description\n \"This task grabs the robots.txt and adds each line as a web page\"\nend", "def enable_db_doc(target_directory)\n task \"#{task_prefix}:db_doc\"\n task \"#{task_prefix}:pre_build\" => [\"#{task_prefix}:db_doc\"]\n\n (up_dirs + down_dirs).each do |relative_dir_name|\n dirs_for_database(relative_dir_name).each do |dir|\n task \"#{task_prefix}:db_doc\" => DbTasks::DbDoc.define_doc_tasks(dir, \"#{target_directory}/#{relative_dir_name}\")\n end\n end\n end", "def description\n \"This task hits the Corpwatch API and creates an entity for all found entities.\"\nend", "def documentation_url; end", "def description\n \"This task hits the corpwatch API and adds detail for the organization.\"\nend", "def description\n\t\t\"Msf Wrapper Task\"\n\tend", "def base_docstring; end", "def parse_task(tk)\n collect_tokens\n add_token tk\n\n token_listener self do\n skip_tkspace false\n\n tk = get_tk\n name = tk.text\n\n @task = @container.find_instance_method_named name\n\n unless @task then\n @task = RDoc::RakeTask.new tokens_to_s, name\n @container.add_method @task\n @stats.add_method @task\n end\n\n @task.comment += use_desc\n\n consume_task_arguments\n end\n\n @task.collect_tokens\n @task.add_tokens token_stream\n\n token_listener @task do\n consume_body\n end\n\n @task\n end", "def define\r\n\t\t\ttask :foo do\r\n\t\t\t\tputs 'foo!'\r\n\t\t\tend\r\n\t\tend", "def docstring; end", "def docstring; end", "def setup\n yard_setup\n YARD::Templates::Engine.template_paths +=\n [File.dirname(__FILE__) + '/../../templates',File.dirname(__FILE__) + '/../../docserver']\n end", "def description\n \"This task simply attaches a note entity to the current entity.\"\nend", "def define_tasks\r\n define_repeat_task\r\n define_clobber_task\r\n define_build_task\r\n end", "def declare_rake_tasks\n @flavor.class.do_declare_resources do\n if snippet?('cookbook_base')\n rake_tasks['foodcritic'] = <<'END'\nrequire 'foodcritic'\nrequire 'foodcritic/rake_task'\n\nFoodCritic::Rake::LintTask.new(:foodcritic)\ntask style: :foodcritic\nEND\n end\n end\n end", "def rake_tasks(&block); end", "def rake_tasks(&block); end", "def docs=(_arg0); end", "def define_tasks\r\n define_clobber_task\r\n define_build_task\r\n end", "def description\n \"This task hits the Google API and finds related content. Discovered domains are created.\"\nend", "def description\n \"This task hits the Google API and finds related content. Discovered domains are created.\"\nend", "def define\n\t\ttask :foo do\n\t\t\tputs 'foo!'\n\t\tend\n\tend", "def todo\n @tasks = TaskDecorator.decorate_collection Task.getReadyToDoTasks\n render \"v1/tasks/index\"\n end", "def load_tasks\n end", "def javadoc_artifact\n javadoc_spec = to_spec_hash.merge(:classifier=>'javadoc')\n javadoc_task = OptionalArtifact.define_task(Buildr.repositories.locate(javadoc_spec))\n javadoc_task.send :apply_spec, javadoc_spec\n javadoc_task\n end", "def define\n super\n namespace :yolo do\n namespace :ocunit do\n desc \"Runs the specified scheme(s) OCUnit tests.\"\n task :test do\n xcodebuild :clean\n xcodebuild :build\n end\n end\n end\n end", "def define\n logger.debug \"Defining tasks for #{name} #{version}\"\n\n namespace \"#{name}\" do\n define_download\n define_verify\n define_unpack\n define_patch\n define_build\n define_install\n\n task :done => \"#{name}:install\"\n task :default => \"#{name}:done\"\n end\n\n desc \"Build and Install #{name} #{version}\"\n task name => \"#{name}:default\"\n end", "def generate_doc(resource_docs)\n generate_index_templates(resource_docs)\n copy_assets!\n end", "def index\n @yas_tasks = YasTask.all\n end", "def generate_documentation_html\n YARD::Registry.yardoc_file = Dir.mktmpdir('auto_yardoc')\n begin\n YARD::Tags::Library.define_tag('', :request_param, :with_types_and_name)\n YARD::Tags::Library.define_tag('', :request_body, :with_types_and_name)\n YARD::Registry.load([settings.app_file], true)\n template_path = File.join(File.dirname(__FILE__), '../../templates_custom')\n YARD::Templates::Engine.register_template_path(template_path)\n YARD::Templates::Engine.render(:object => YARD::Registry.resolve(nil, self.class.to_s),\n :format => :html)\n ensure\n YARD::Registry.delete_from_disk\n end\n end", "def define\n desc default_description unless ::Rake.application.last_description\n\n task(name, [:files]) do |_task, task_args|\n run_cli(task_args)\n end\n end", "def docs\n options.verbose? ? @@log.level = Logger::DEBUG : @@log.level = Logger::ERROR\n repo_parent_dir = File.expand_path(\"#{File.dirname(__FILE__)}/../../\") \n remove_dir \"#{repo_parent_dir}/origin-docs\"\n empty_directory \"#{repo_parent_dir}/origin-docs\"\n \n unless File.exist?(\"/tmp/yard-js\")\n run \"git clone git://github.com/lsegal/yard-js /tmp/yard-js\"\n inside(\"/tmp/yard-js\") { run \"bundle install\" }\n end\n \n inside(repo_parent_dir) do\n doc_files = Dir[\"#{repo_parent_dir}/origin-server/documentation/*.md\"].join(\",\")\n inside(\"origin-server/documentation\") { run \"yardoc --markup=markdown --output-dir '#{repo_parent_dir}/origin-docs' --files #{doc_files}\" }\n run %{yardoc --output-dir '#{repo_parent_dir}/origin-docs/broker' --main origin-server/documentation/broker.md --private --protected --exclude test \\\n #{Dir[\"origin-server/broker/**/*.rb\"].join(' ')} \\\n #{Dir[\"origin-server/controller/**/*.rb\"].join(' ')}}\n run %{yardoc --output-dir '#{repo_parent_dir}/origin-docs/rest_api' --main origin-server/documentation/rest_api.md --api REST \\\n #{Dir[\"origin-server/controller/**/*.rb\"].join(' ')}}\n run \"yardoc --output-dir '#{repo_parent_dir}/origin-docs/broker_models' --main origin-server/documentation/broker_models.md --private --protected --api model #{Dir[\"origin-server/controller/**/*.rb\"].join(' ')}\"\n run \"yardoc --output-dir '#{repo_parent_dir}/origin-docs/node' --main origin-server/documentation/node.md --private --protected --exclude test #{Dir[\"origin-server/node/**/*.rb\"].join(' ')}\"\n run \"yardoc --output-dir '#{repo_parent_dir}/origin-docs/common' --main origin-server/documentation/common.md --private --protected --exclude test #{Dir[\"origin-server/common/**/*.rb\"].join(' ')}\"\n run \"yardoc --output-dir '#{repo_parent_dir}/origin-docs/build-tools' --main origin-dev-tools/README.md --private --protected --exclude test #{Dir[\"origin-dev-tools/build/*\"].join(' ')}\"\n end\n end", "def define(args, &task_block)\n desc \"Generate Swagger from AppMaps\" unless ::Rake.application.last_description\n\n task(name, *args) do |_, task_args|\n RakeFileUtils.__send__(:verbose, Rake.verbose == true) do\n task_block.call(*[self, task_args].slice(0, task_block.arity)) if task_block\n Command.new(:git_command).tap do |cmd|\n cmd.base = task_args[:base] || self.base\n cmd.swagger_file = task_args[:swagger_file] || self.swagger_file\n end.perform\n end\n end\n end", "def docs api\n\t\tget_html \"#{@options[:docs]}/#{@apis[api][:url]}.htm\"\n\tend", "def doc; end", "def doc; end", "def doc; end", "def doc; end", "def request_doc\n \n end", "def define_tasks\n define_weave_task\n connect_common_tasks\n end", "def task(name, description=nil, &block)\n puts \"adding task :#{name}\"\n in_root(\"lib/tasks\") do |folder|\n File.open(\"#{folder}/application.rake\", \"a+\") do |f|\n if block_given?\n f.write(code_for(block))\n else\n f.write(data)\n end\n end\n end\n end", "def fulldoc_template; end", "def define_anchor_task\n desc @anchor_description\n task 'anchor' do\n # In order to make it possible to stop coverage services like Coveralls\n # which interferes with ttnt/anchor because both use coverage library.\n # See test/test_helper.rb\n ENV['ANCHOR_TASK'] = '1'\n\n Rake::FileUtilsExt.verbose(verbose) do\n # Make it possible to require files in this gem\n gem_root = File.expand_path('../..', __FILE__)\n args =\n \"-I#{gem_root} -r ttnt/anchor \" +\n \"#{ruby_opts_string}\"\n\n expanded_file_list.each do |test_file|\n run_ruby \"#{args} #{test_file}\"\n end\n end\n\n if @code_files\n mapping = TestToCodeMapping.new(repo)\n mapping.select_code_files!(@code_files)\n mapping.write!\n end\n end\n end", "def docstring=(_arg0); end", "def description\n \"This is an example Tapir task. It associates a random host with the calling entity.\"\nend", "def define_default_tasks\n\n\t\t# task used to indicate that rake-deveiate has already been setup once; for\n\t\t# global rakefiles.\n\t\ttask :deveiate do\n\t\t\t# no-op\n\t\tend\n\n\t\tdesc \"The task that runs by default\"\n\t\ttask( :default => :spec )\n\n\t\tdesc \"Check in the current changes\"\n\t\ttask :checkin => [ :precheckin, :check, :test ]\n\t\ttask :commit => :checkin\n\t\ttask :ci => :checkin\n\t\ttask :precheckin\n\n\t\tdesc \"Sanity-check the project\"\n\t\ttask :check\n\n\t\tdesc \"Update the history file\"\n\t\ttask :update_history\n\n\t\tdesc \"Package up and push a release\"\n\t\ttask :release => [ :prerelease, :gem, :release_gem, :postrelease ]\n\t\ttask :prerelease\n\t\ttask :release_gem\n\t\ttask :postrelease\n\n\t\tdesc \"Run all the project's tests\"\n\t\ttask :test\n\t\ttask :spec\n\t\ttask :integration\n\n\t\tdesc \"Set up the project for development\"\n\t\ttask :setup do\n\t\t\tself.install_dependencies\n\t\tend\n\n\t\tdesc \"Turn on maintainer mode: build with extra warnings and debugging\"\n\t\ttask :maint do\n\t\t\tENV['MAINTAINER_MODE'] = 'yes'\n\t\tend\n\n\tend", "def define_codnar_task\n @spec.files.each do |file|\n configurations = Rake.split_configurations(file)\n Codnar::Rake::SplitTask.new([ file ], configurations) unless configurations == []\n end\n Codnar::Rake::WeaveTask.new(\"doc/root.html\", @weave_configurations)\n end", "def tasks(wspace=workspace)\n\t\twspace.tasks\n\tend", "def documentation_path(extra_path=[])\n path = [File.expand_path(File.join(Msf::Config.module_directory, '..', 'documentation', 'modules' )),\n File.expand_path(File.join(Msf::Config.user_module_directory, '..', 'documentation', 'modules' )),\n ]\n #if Msf::Config.method_defined? :staekka_path\n if Msf::Config.methods.include? :staekka_path\n path << File.expand_path(File.join(Msf::Config.staekka_path, 'documentation', 'modules' ))\n end\n path.concat(extra_path)\n\n Msf::Config.singleton_class.send(:define_method, :doc_search_path=) do |opt|\n @info_path = opt\n @info_path\n end\n Msf::Config.singleton_class.send(:define_method, :doc_search_path) do\n @info_path\n end\n Msf::Config.doc_search_path=path\n end", "def simple_rake_task(task_name = 'test_task', indent: '', task_body: \"\\n\")\n \"\\n\" + indent +\n \"desc 'task named #{task_name}'\\n\" +\n indent + \"task :#{task_name} do\\n\" +\n indent + \" \" + task_body +\n indent + \"end\\n\\n\"\n\n end", "def parse_namespace\n skip_tkspace\n\n tk = get_tk\n\n namespace = @container.add_module RDoc::RakeNamespace, tk.text[1..-1]\n\n skip_tkspace\n\n old_namespace = @container\n\n begin\n @nest += 1\n @container = namespace\n\n parse_rakefile\n ensure\n @container = old_namespace\n @nest -= 1\n end\n end", "def install_tasks\n load 'falkorlib/tasks/git.rake'\n load 'falkorlib/tasks/gitflow.rake'\n end", "def javadoc(*args)\n Buildr.application.deprecated 'Use Java::Commands.javadoc instead.'\n Commands.javadoc(*args)\n end", "def simpleTasks _args\n \"simpleTasks _args;\" \n end", "def list_tasks\n # ASK REPO for the tasks\n tasks = @task_repository.all\n # ASK VIEW to display them\n @tasks_view.display(tasks)\n end", "def tasks\n ProjectConfiguration.templates[template]::TASKS\n end", "def tasks\n ProjectConfiguration.templates[template]::TASKS\n end", "def registeredTasks _args\n \"registeredTasks _args;\" \n end", "def load_special_tasks\n $bot[:tasks][:list] = {\n block: -> do\n list_tasks\n end,\n desc: 'List all available tasks'\n }\n end", "def documentation\n\troot = settings.root + '/doc'\n\tcontent = File.open(\"#{root}/#{@user.default_locale}.textile\", 'r').read()\n\tRedCloth.new(content).to_html\nend", "def cron_tasks\n end", "def rake project_symbol, options = {}, &gem_config\n program_file = first_caller_file\n program_home = File.dirname(program_file)\n\n # load the project module\n program_name = File.basename(program_home)\n\n require File.join('lib', program_name)\n project_module = fetch_project_module(project_symbol)\n\n # supply default options\n options[:rubyforge_project] ||= program_name\n options[:rubyforge_section] ||= program_name\n options[:raa_project] ||= program_name\n options[:license_file] ||= 'LICENSE'\n options[:logins_file] ||= File.join(ENV['HOME'], '.config', 'inochi', 'logins.yaml')\n options[:upload_delete] ||= false\n options[:upload_options] ||= []\n \n authors = project_module.const_get(:AUTHORS) rescue nil\n unless authors\n # add AUTHORS constant to the project module\n license = File.read(options[:license_file])\n\n copyright_holders =\n license.scan(/Copyright.*?\\d+\\s+(.*)/).flatten.\n map {|s| (s =~ /\\s*<(.*?)>/) ? [$`, $1] : [s, ''] }\n\n project_module.const_set :AUTHORS, copyright_holders\n end\n\n require 'rake/clean'\n\n hide_rake_task = lambda do |name|\n Rake::Task[name].instance_variable_set :@comment, nil\n end\n\n # documentation\n desc 'Build all documentation.'\n task :doc => %w[ doc:api doc:man ]\n\n # user manual\n doc_man_src = 'doc/index.erb'\n doc_man_dst = 'doc/index.xhtml'\n doc_man_deps = FileList['doc/*.erb']\n\n doc_man_doc = nil\n task :doc_man_doc => doc_man_src do\n unless doc_man_doc\n unless project_symbol == :ERBook\n gem 'erbook', '~> 6'\n require 'erbook'\n end\n\n doc_man_txt = File.read(doc_man_src)\n doc_man_doc = ERBook::Document.new(:xhtml, doc_man_txt, doc_man_src, :unindent => true)\n end\n end\n\n desc 'Build the user manual.'\n task 'doc:man' => doc_man_dst\n\n file doc_man_dst => doc_man_deps do\n Rake::Task[:doc_man_doc].invoke\n File.write doc_man_dst, doc_man_doc\n end\n\n CLOBBER.include doc_man_dst\n\n # API reference\n doc_api_dst = 'doc/api'\n\n desc 'Build API reference.'\n task 'doc:api' => doc_api_dst\n\n require 'yard'\n YARD::Rake::YardocTask.new doc_api_dst do |t|\n t.options.push '--protected',\n '--output-dir', doc_api_dst,\n '--readme', options[:license_file]\n\n task doc_api_dst => options[:license_file]\n end\n\n hide_rake_task[doc_api_dst]\n\n CLEAN.include '.yardoc'\n CLOBBER.include doc_api_dst\n\n # announcements\n desc 'Build all release announcements.'\n task :ann => %w[ ann:feed ann:html ann:text ann:mail ]\n\n # it has long been a tradition to use an \"[ANN]\" prefix\n # when announcing things on the ruby-talk mailing list\n ann_prefix = '[ANN] '\n ann_subject = ann_prefix + project_module::DISPLAY\n ann_project = ann_prefix + project_module::PROJECT\n\n # fetch the project summary from user manual\n ann_nfo_doc = nil\n task :ann_nfo_doc => :doc_man_doc do\n ann_nfo_doc = $project_summary_node\n end\n\n # fetch release notes from user manual\n ann_rel_doc = nil\n task :ann_rel_doc => :doc_man_doc do\n unless ann_rel_doc\n if parent = $project_history_node\n if child = parent.children.first\n ann_rel_doc = child\n else\n raise 'The \"project_history\" node in the user manual lacks child nodes.'\n end\n else\n raise 'The user manual lacks a \"project_history\" node.'\n end\n end\n end\n\n # build release notes in HTML and plain text\n # converts the given HTML into plain text. we do this using\n # lynx because (1) it outputs a list of all hyperlinks used\n # in the HTML document and (2) it runs on all major platforms\n convert_html_to_text = lambda do |html|\n require 'tempfile'\n\n begin\n # lynx's -dump option requires a .html file\n tmp_file = Tempfile.new(Inochi::PROGRAM).path + '.html'\n\n File.write tmp_file, html\n text = `lynx -dump #{tmp_file} -width 70`\n ensure\n File.delete tmp_file\n end\n\n # improve readability of list items that span multiple\n # lines by adding a blank line between such items\n text.gsub! %r{^( *[^\\*\\s].*)(\\r?\\n)( *\\* \\S)}, '\\1\\2\\2\\3'\n\n text\n end\n\n # binds relative addresses in the given HTML to the project docsite\n resolve_html_links = lambda do |html|\n # resolve relative URLs into absolute URLs\n # see http://en.wikipedia.org/wiki/URI_scheme#Generic_syntax\n require 'addressable/uri'\n uri = Addressable::URI.parse(project_module::DOCSITE)\n doc_url = uri.to_s\n dir_url = uri.path =~ %r{/$|^$} ? doc_url : File.dirname(doc_url)\n\n html.to_s.gsub %r{(href=|src=)(.)(.*?)(\\2)} do |match|\n a, b = $1 + $2, $3.to_s << $4\n\n case $3\n when %r{^[[:alpha:]][[:alnum:]\\+\\.\\-]*://} # already absolute\n match\n\n when /^#/\n a << File.join(doc_url, b)\n\n else\n a << File.join(dir_url, b)\n end\n end\n end\n\n ann_html = nil\n task :ann_html => [:doc_man_doc, :ann_nfo_doc, :ann_rel_doc] do\n unless ann_html\n ann_html = %{\n <center>\n <h1>#{project_module::DISPLAY}</h1>\n <p>#{project_module::TAGLINE}</p>\n <p>#{project_module::WEBSITE}</p>\n </center>\n #{ann_nfo_doc}\n #{ann_rel_doc}\n }\n\n # remove heading navigation menus\n ann_html.gsub! %r{<div class=\"nav\"[^>]*>(.*?)</div>}, ''\n\n ann_html = resolve_html_links[ann_html]\n end\n end\n\n ann_text = nil\n task :ann_text => :ann_html do\n unless ann_text\n ann_text = convert_html_to_text[ann_html]\n end\n end\n\n ann_nfo_text = nil\n task :ann_nfo_text => :ann_nfo_doc do\n unless ann_nfo_text\n ann_nfo_html = resolve_html_links[ann_nfo_doc]\n ann_nfo_text = convert_html_to_text[ann_nfo_html]\n end\n end\n\n # HTML\n ann_html_dst = 'ANN.html'\n\n desc \"Build HTML announcement: #{ann_html_dst}\"\n task 'ann:html' => ann_html_dst\n\n file ann_html_dst => doc_man_deps do\n Rake::Task[:ann_html].invoke\n File.write ann_html_dst, ann_html\n end\n\n CLEAN.include ann_html_dst\n\n # RSS feed\n ann_feed_dst = 'doc/ann.xml'\n\n desc \"Build RSS announcement: #{ann_feed_dst}\"\n task 'ann:feed' => ann_feed_dst\n\n file ann_feed_dst => doc_man_deps do\n require 'time'\n require 'rss/maker'\n\n feed = RSS::Maker.make('2.0') do |feed|\n feed.channel.title = ann_project\n feed.channel.link = project_module::WEBSITE\n feed.channel.description = project_module::TAGLINE\n\n Rake::Task[:ann_rel_doc].invoke\n Rake::Task[:ann_html].invoke\n\n item = feed.items.new_item\n item.title = ann_rel_doc.title\n item.link = project_module::DOCSITE + '#' + ann_rel_doc.here_frag\n item.date = Time.parse(item.title)\n item.description = ann_html\n end\n\n File.write ann_feed_dst, feed\n end\n\n CLOBBER.include ann_feed_dst\n\n # plain text\n ann_text_dst = 'ANN.txt'\n\n desc \"Build plain text announcement: #{ann_text_dst}\"\n task 'ann:text' => ann_text_dst\n\n file ann_text_dst => doc_man_deps do\n Rake::Task[:ann_text].invoke\n File.write ann_text_dst, ann_text\n end\n\n CLEAN.include ann_text_dst\n\n # e-mail\n ann_mail_dst = 'ANN.eml'\n\n desc \"Build e-mail announcement: #{ann_mail_dst}\"\n task 'ann:mail' => ann_mail_dst\n\n file ann_mail_dst => doc_man_deps do\n File.open ann_mail_dst, 'w' do |f|\n require 'time'\n f.puts \"Date: #{Time.now.rfc822}\"\n\n f.puts 'To: [email protected]'\n f.puts 'From: \"%s\" <%s>' % project_module::AUTHORS.first\n f.puts \"Subject: #{ann_subject}\"\n\n Rake::Task[:ann_text].invoke\n f.puts '', ann_text\n end\n end\n\n CLEAN.include ann_mail_dst\n\n # packaging\n desc 'Build a release.'\n task :pak => [:clobber, :doc] do\n sh $0, 'package'\n end\n CLEAN.include 'pkg'\n\n # ruby gem\n require 'rake/gempackagetask'\n\n gem = Gem::Specification.new do |gem|\n authors = project_module::AUTHORS\n\n if author = authors.first\n gem.author, gem.email = author\n end\n\n if authors.length > 1\n gem.authors = authors.map {|name, mail| name }\n end\n\n gem.rubyforge_project = options[:rubyforge_project]\n\n # XXX: In theory, `gem.name` should be assigned to\n # ::PROJECT instead of ::PROGRAM\n #\n # In practice, PROJECT may contain non-word\n # characters and may also contain a mixture\n # of lowercase and uppercase letters.\n #\n # This makes it difficult for people to\n # install the project gem because they must\n # remember the exact spelling used in\n # `gem.name` when running `gem install ____`.\n #\n # For example, consider the \"RedCloth\" gem.\n #\n gem.name = project_module::PROGRAM\n\n gem.version = project_module::VERSION\n gem.summary = project_module::TAGLINE\n gem.description = gem.summary\n gem.homepage = project_module::WEBSITE\n gem.files = FileList['**/*'].exclude('_darcs') - CLEAN\n gem.executables = project_module::PROGRAM\n gem.has_rdoc = true\n\n unless project_module == Inochi\n gem.add_dependency 'inochi', Inochi::VERSION.requirement\n end\n\n project_module::REQUIRE.each_pair do |gem_name, version_reqs|\n gem.add_dependency gem_name, *version_reqs\n end\n\n # additional configuration is done by user\n yield gem if gem_config\n end\n\n Rake::GemPackageTask.new(gem).define\n\n # XXX: hide the tasks defined by the above gem packaging library\n %w[gem package repackage clobber_package].each {|t| hide_rake_task[t] }\n\n # releasing\n desc 'Publish a release.'\n task 'pub' => %w[ pub:pak pub:doc pub:ann ]\n\n # connect to RubyForge services\n pub_forge = nil\n pub_forge_project = options[:rubyforge_project]\n pub_forge_section = options[:rubyforge_section]\n\n task :pub_forge do\n require 'rubyforge'\n pub_forge = RubyForge.new\n pub_forge.configure('release_date' => project_module::RELEASE)\n\n unless pub_forge.autoconfig['group_ids'].key? pub_forge_project\n raise \"The #{pub_forge_project.inspect} project was not recognized by the RubyForge client. Either specify a different RubyForge project by passing the :rubyforge_project option to Inochi.rake(), or ensure that the client is configured correctly (see `rubyforge --help` for help) and try again.\"\n end\n\n pub_forge.login\n end\n\n # documentation\n desc 'Publish documentation to project website.'\n task 'pub:doc' => [:doc, 'ann:feed'] do\n target = options[:upload_target]\n\n unless target\n require 'addressable/uri'\n docsite = Addressable::URI.parse(project_module::DOCSITE)\n\n # provide uploading capability to websites hosted on RubyForge\n if docsite.host.include? '.rubyforge.org'\n target = \"#{pub_forge.userconfig['username']}@rubyforge.org:#{File.join '/var/www/gforge-projects', options[:rubyforge_project], docsite.path}\"\n end\n end\n\n if target\n cmd = ['rsync', '-auvz', 'doc/', \"#{target}/\"]\n cmd.push '--delete' if options[:upload_delete]\n cmd.concat options[:upload_options]\n\n p cmd\n sh(*cmd)\n end\n end\n\n # announcement\n desc 'Publish all announcements.'\n task 'pub:ann' => %w[ pub:ann:forge pub:ann:raa pub:ann:talk ]\n\n # login information\n ann_logins_file = options[:logins_file]\n ann_logins = nil\n\n task :ann_logins do\n ann_logins = begin\n require 'yaml'\n YAML.load_file ann_logins_file\n rescue => e\n warn \"Could not read login information from #{ann_logins_file.inspect}:\"\n warn e\n warn \"** You will NOT be able to publish release announcements! **\"\n {}\n end\n end\n\n desc 'Announce to RubyForge news.'\n task 'pub:ann:forge' => :pub_forge do\n project = options[:rubyforge_project]\n\n if group_id = pub_forge.autoconfig['group_ids'][project]\n # check if this release was already announced\n require 'mechanize'\n www = WWW::Mechanize.new\n page = www.get \"http://rubyforge.org/news/?group_id=#{group_id}\"\n\n posts = (page/'//a[starts-with(./@href, \"/forum/forum.php?forum_id=\")]/text()').map {|e| e.to_s.gsub(\"\\302\\240\", '').strip }\n\n already_announced = posts.include? ann_subject\n\n if already_announced\n warn \"This release was already announced to RubyForge news, so I will NOT announce it there again.\"\n else\n # make the announcement\n Rake::Task[:ann_text].invoke\n pub_forge.post_news project, ann_subject, ann_text\n\n puts \"Successfully announced to RubyForge news:\"\n puts page.uri\n end\n else\n raise \"Could not determine the group_id of the #{project.inspect} RubyForge project. Run `rubyforge config` and try again.\"\n end\n end\n\n desc 'Announce to ruby-talk mailing list.'\n task 'pub:ann:talk' => :ann_logins do\n host = 'http://ruby-forum.com'\n ruby_talk = 4 # ruby-talk forum ID\n\n require 'mechanize'\n www = WWW::Mechanize.new\n\n # check if this release was already announced\n already_announced =\n begin\n page = www.get \"#{host}/forum/#{ruby_talk}\", :filter => %{\"#{ann_subject}\"}\n\n posts = (page/'//div[@class=\"forum\"]//a[starts-with(./@href, \"/topic/\")]/text()').map {|e| e.to_s.strip }\n posts.include? ann_subject\n rescue\n false\n end\n\n if already_announced\n warn \"This release was already announced to the ruby-talk mailing list, so I will NOT announce it there again.\"\n else\n # log in to RubyForum\n page = www.get \"#{host}/user/login\"\n form = page.forms.first\n\n if login = ann_logins['www.ruby-forum.com']\n form['name'] = login['user']\n form['password'] = login['pass']\n end\n\n page = form.click_button # use the first submit button\n\n if (page/'//a[@href=\"/user/logout\"]').empty?\n warn \"Could not log in to RubyForum using the login information in #{ann_logins_file.inspect}, so I can NOT announce this release to the ruby-talk mailing list.\"\n else\n # make the announcement\n page = www.get \"#{host}/topic/new?forum_id=#{ruby_talk}\"\n form = page.forms.first\n\n Rake::Task[:ann_text].invoke\n form['post[subject]'] = ann_subject\n form['post[text]'] = ann_text\n\n form.checkboxes.first.check # enable email notification\n page = form.submit\n\n errors = [page/'//div[@class=\"error\"]/text()'].flatten\n if errors.empty?\n puts \"Successfully announced to ruby-talk mailing list:\"\n puts page.uri\n else\n warn \"Could not announce to ruby-talk mailing list:\"\n warn errors.join(\"\\n\")\n end\n end\n end\n end\n\n desc 'Announce to RAA (Ruby Application Archive).'\n task 'pub:ann:raa' => :ann_logins do\n show_page_error = lambda do |page, message|\n warn \"#{message}, so I can NOT announce this release to RAA:\"\n warn \"#{(page/'h2').text} -- #{(page/'p').first.text.strip}\"\n end\n\n resource = \"#{options[:raa_project].inspect} project entry on RAA\"\n\n require 'mechanize'\n www = WWW::Mechanize.new\n page = www.get \"http://raa.ruby-lang.org/update.rhtml?name=#{options[:raa_project]}\"\n\n if form = page.forms[1]\n resource << \" (owned by #{form.owner.inspect})\"\n\n Rake::Task[:ann_nfo_text].invoke\n form['description'] = ann_nfo_text\n form['description_style'] = 'Pre-formatted'\n form['short_description'] = project_module::TAGLINE\n form['version'] = project_module::VERSION\n form['url'] = project_module::WEBSITE\n form['pass'] = ann_logins['raa.ruby-lang.org']['pass']\n\n page = form.submit\n\n if page.title =~ /error/i\n show_page_error[page, \"Could not update #{resource}\"]\n else\n puts \"Successfully announced to RAA (Ruby Application Archive).\"\n end\n else\n show_page_error[page, \"Could not access #{resource}\"]\n end\n end\n\n # release packages\n desc 'Publish release packages to RubyForge.'\n task 'pub:pak' => :pub_forge do\n # check if this release was already published\n version = project_module::VERSION\n packages = pub_forge.autoconfig['release_ids'][pub_forge_section]\n\n if packages and packages.key? version\n warn \"The release packages were already published, so I will NOT publish them again.\"\n else\n # create the FRS package section\n unless pub_forge.autoconfig['package_ids'].key? pub_forge_section\n pub_forge.create_package pub_forge_project, pub_forge_section\n end\n\n # publish the package to the section\n uploader = lambda do |command, *files|\n pub_forge.__send__ command, pub_forge_project, pub_forge_section, version, *files\n end\n\n Rake::Task[:pak].invoke\n packages = Dir['pkg/*.[a-z]*']\n\n unless packages.empty?\n # NOTE: use the 'add_release' command ONLY for the first\n # file because it creates a new sub-section on the\n # RubyForge download page; we do not want one package\n # per sub-section on the RubyForge download page!\n #\n uploader[:add_release, packages.shift]\n\n unless packages.empty?\n uploader[:add_file, *packages]\n end\n\n puts \"Successfully published release packages to RubyForge.\"\n end\n end\n end\n end", "def show_readme\n readme 'lib/generators/cms/fortress/templates/README'\n end", "def classScalaDoc(docs)\n return <<CLASS_SCALADOC\n/**\n#{ScalaLexer.scalaDocs(docs)}\n */\nCLASS_SCALADOC\n end", "def docs_to_write; end" ]
[ "0.745012", "0.74387044", "0.68841517", "0.6852393", "0.674926", "0.6658012", "0.66527605", "0.65105695", "0.6449596", "0.6446869", "0.643214", "0.6293453", "0.6268048", "0.6184708", "0.6162877", "0.60477924", "0.60397905", "0.60304904", "0.58881825", "0.5866949", "0.5865069", "0.5857377", "0.58157563", "0.5798622", "0.57887834", "0.5777445", "0.5758407", "0.5755516", "0.57282007", "0.5727178", "0.57233", "0.5711902", "0.57093954", "0.56890696", "0.56812024", "0.5670655", "0.5652457", "0.5637219", "0.55945885", "0.55933464", "0.5577714", "0.557505", "0.55693614", "0.555896", "0.55536586", "0.55536586", "0.55423456", "0.55319816", "0.5524291", "0.55237794", "0.551036", "0.551036", "0.5478669", "0.5474105", "0.547056", "0.547056", "0.5442436", "0.54184264", "0.541529", "0.54066396", "0.537137", "0.53605825", "0.53575456", "0.5351428", "0.53424495", "0.5338805", "0.53327703", "0.5327182", "0.5317144", "0.5314202", "0.5314202", "0.5314202", "0.5314202", "0.53103787", "0.5291875", "0.5289772", "0.52718055", "0.5266599", "0.5264706", "0.52477103", "0.52447426", "0.5244382", "0.5223771", "0.5221245", "0.5219621", "0.52023596", "0.51961803", "0.5192837", "0.5187109", "0.5184946", "0.5183981", "0.5183981", "0.51764816", "0.5162113", "0.51600987", "0.51584905", "0.5147833", "0.5144927", "0.5130793", "0.5130073" ]
0.62405163
13
prompt the user to repeat back the new sequence that was just shown to them, one color at a time. The game is over if the guess is incorrect.
def require_sequence puts "Repeat Sequence (Only repeat the first letter of each color):" seq.length.times do |i| print "> " input = gets.chomp if input != seq[i] self.game_over = true break end end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def play_game \n\n @number_guesses = 0\n @start_time = Time.now\n puts \"I have generated a secret code, a sequence of four colors: (R)ed, (G)reen, (B)lue, and (Y)ellow\"\n\n game_over = false \n\n until game_over \n \n guess = get_guess\n\n exit_game if guess.upcase == \"Q\" \n guess = @secret_code if guess.upcase == \"C\" # provide option to cheat\n\n if validate_guess(guess)\n @number_guesses += 1\n # add_guess_to_history(guess_report(guess))\n display_history(guess)\n\n if guess.upcase == @secret_code\n game_over = true \n end_game\n else\n puts \"Please guess again.\" \n end\n end\n end\n end", "def ask_for_guess\n @guess_count += 1\n @guess = get_code(\"Please enter your guess in the format 'color color color color'(to list available color options enter 'options'):\")\n end", "def enter_colors\n speak\n guess\n end", "def play\n puts \"I have generated a beginner sequence with four elements made up of: (r)ed,\n (g)reen, (b)lue, and (y)ellow. Use (q)uit at any time to end the game.\n What's your guess?\"\n mastermind = Game.new\n mastermind.game_solution\n amount_of_guesses = 0\n guess = gets.chomp\n user_guess = []\n user_guess << guess.chars\n amount_of_guesses += 1\n mastermind.check_for_correct_letters(user_guess)\n mastermind.check_for_correct_indexes(user_guess, mastermind.solution)\n until guess == \"q\"\n if guess == \"c\"\n puts \"The solution is #{mastermind.solution}\"\n break\n elsif guess.length > 4\n puts \"Your guess was too long.\"\n elsif guess.length < 4\n puts \"Your guess was too short.\"\n elsif guess != mastermind.solution\n puts \"You had #{mastermind.correct_letters} correct colors with #{mastermind.correct_indexes}\n in the correct position.\"\n break\n elsif guess == mastermind.solution.to_s\n \"You won! You guessed #{guess}. You had #{mastermind.correct_letters} correct\n colors in #{mastermind.correct_indexes} correct positions.\n You guessed #{amount_of_guesses.to_s} times.\"\n break\n else\n puts \"Goodbye, quitter.\"\n end\n end\nend", "def askGuess\n valid = false\n while (!valid)\n puts \"The choices are: #{'red'.bright_red} #{'yellow'.bright_yellow} #{'green'.green} #{'blue'.cyan} #{'orange'.red} #{'purple'.magenta};\\n\n Please enter your guess for the first color:\"\n c1 = gets.chomp \n puts \"Please enter your guess for the second color:\"\n c2 = gets.chomp\n puts \"Please enter your guess for the third color:\"\n c3 = gets.chomp\n puts \"Please enter your guess for the fourth color:\"\n c4 = gets.chomp\n if (valid?(c1) && valid?(c2) && valid?(c3) && valid?(c4))\n valid = true\n else \n puts \"\\nThere was a problem with your input. Please try again with correct input\\n\"\n end\n end\n @guess = [c1, c2, c3, c4]\n puts \"your guess was #{@guess.to_s}\"\n end", "def make_guess\n\t\tget_colors_from_player MAKE_GUESS_PROMPT\n\tend", "def get_guess\n @user_guess = \"\"\n if @count >= 5\n abort(\"You guessed 5 times incorrectly. You Lose\")\n else\n while guess_letters_check\n print \"You must guess exactly 4 colors (R,Y,B,P,O,G).\\nGuess here:\"\n @user_guess = gets.chomp.upcase\n end\n end\n @count += 1\n check_guess(@user_guess)\n end", "def player_guess\n print 'Red = 1, '.colorize(:red), 'Green = 2, '.colorize(:green), 'Blue = 3, '.colorize(:blue),\n 'Magenta = 4, '.colorize(:magenta), 'Cyan = 5, '.colorize(:cyan), \"Yellow = 6\\n\".colorize(:light_yellow)\n puts\n puts \"Select four colours or enter 's' to show previous guesses.\"\n colour_selection = gets.chomp\n if colour_selection == 's'\n show_previous_guesses\n player_guess\n elsif colour_selection.length == 4 && !colour_selection.match?(/[^123456]/)\n store_colour_selection(colour_selection)\n else\n puts 'Input four numbers (1-6)'\n player_guess\n end\n end", "def play_game\n\t\twhile @turn < 13\n\t\t\tputs \"Lets see if you figured out my code!\"\n\t\t\tputs \"Please select four colors as your guess. They can either mix and match or all be the same\"\n\t\t\tputs \"No spaces please!\"\n\t\t\tputs \"Your choices are 'R', 'G', 'B', 'Y', 'H', 'P'.\"\n\t\t\tguess = gets.chomp.upcase\n\t\t\tfeedback = compare(guess)\n\t\t\tif feedback == [\"O\", \"O\", \"O\", \"O\"]\n\t\t\t\tputs \"~~~~~~~~~~~\"\n\t\t\t\tputs \"You won!!!!\"\n\t\t\t\tputs \"~~~~~~~~~~~\"\n\t\t\t\tputs \"You have cracked the code of #{@master_code}\"\n\t\t\t\texit\n\t\t\telse\n\t\t\t\tputs \"Sorry! Guess again\"\n\t\t\t\tputs \"Here is currently what you have right #{feedback}\"\n\t\t\t\tputs \"---------------\"\n\t\t\t\tputs \"---------------\"\n\t\t\t\t@turn += 1\n\t\t\t\tputs \"That was turn number \" + @turn.to_s\n\t\t\t\tplay_game\n\t\t\tend\n\t\t\tputs \"You reached your max of 12 turns....game over!\"\n\t\tend\n\tend", "def expect_guess\n print \"Guess ##{turn}. What's your code? \"\n input = gets.chomp\n throw :quit if input == \"q\"\n \n if valid_input? input\n self.last_guess = Code.parse(input)\n puts \"Your guess was #{last_guess}.\"\n else\n puts \"Please enter 4 valid colors.\"\n expect_guess\n end\n end", "def reset_for_new_round(role)\n if role == 'code breaker'\n @@player_key = []\n puts \"You have #{12 - @@turn_counter} turns left.\"\n else\n @computer_key = []\n keep_color_matches = []\n answer = ''\n puts \"The computer has #{12 - @@turn_counter} turns left.\"\n puts \"Type 'go' to play the next round\"\n answer = gets.chomp.downcase.strip until answer == 'go'\n end\n @@direct_matches = []\n @@color_matches = []\n end", "def guess_code\n puts \"\\nYou'll have 12 turns to guess the color code set by me.\\nNot So Good Luck jk\\nOptions: r,b,y,g,o,w,v\"\n catch(:guessed) do\n guess = ''\n (1..12).each do |turn|\n print \"\\nGuess #{turn}: \"\n #print \"#{get_code} \"\n guess = gets.chomp!.downcase\n response = ''\n gc = get_code.split('')\n g = guess.split('')\n for i in 0..3 do \n if g[i]==gc[i]\n response += 'X'\n elsif gc.include?(g[i]) \n response += 'O'\n end\n end\n puts \"Response: #{response.split('').shuffle.join('')}\"\n if response=='XXXX'\n puts \"\\nCongratulations! I Lost\\nThe code was #{get_code}\"\n throw :guessed\n elsif response!='XXXX' and turn==12\n puts \"\\nBoohoo! I won\\nTHe code was #{get_code}\"\n throw :guessed\n end\n end\n end\n end", "def guess_code\n puts \"\\nI have 12 turns to guess the color code that you have set.\\nLet's see whether I can do it or not\"\n catch(:guessed) do\n (1..12).each do |turn|\n sleep(rand(1..8))\n print \"\\nMy guess #{turn}: \"\n guess = @role.random_code\n puts guess\n response = validate_guess(guess.split(''), get_code.split(''))\n puts \"Response: #{response}\"\n validate_response(response, turn)\n end\n end \n end", "def play_as_codebreaker\n # answer = colors.sample(4)\n loop do\n display.tables(hint.table,decode.table)\n puts \"Please enter your guess #{colors.join(' | ')}:\"\n player.input = gets.split.map(&:capitalize)\n if player.valid_input?\n if player.win?(answer)\n puts \"You won! #{display_answer}\"\n exit\n elsif player.lose?\n puts \"You lost! #{display_answer}\"\n exit\n else\n complete_turn\n end\n else\n puts \"Not a valid input!\"\n end\n end\n end", "def play\n winner = false\n while(!winner)\n puts \"please input a row of four colors\"\n puts \"your options are B: for black, b: for blue\"\n puts \"g: green, y: yellow, r: red , w: white\"\n @grid.board\n input = []\n (4).times { input << gets.chomp}\n #make sure input is a game color\n if((input - @colors).empty?)\n @grid.input(input, @turns)\n else\n \"Please enter an option from the menu below\"\n play\n end\n @turns += 1\n end\n end", "def comp_play\n master_list = create_list\n puts \"Enter the code: \"\n @code = get_input\n puts \"Code: #{@code}\"\n\n guess_count = 1\n matches = 0\n\n rand1 = rand(6)\n rand2 = rand(6)\n until rand2 != rand1\n rand2 = rand(6)\n end\n input = [@@COLORS[rand1], @@COLORS[rand1], @@COLORS[rand2], @@COLORS[rand2]]\n\n until matches == 4 or guess_count > @@MAX_GUESSES\n puts \"Turn #{guess_count}\"\n puts \"Computer guesses #{input}\"\n\n results = check_guess(input, @code)\n matches = results[0]\n\n puts \"Black pins: #{results[0]} White pins: #{results[1]}\"\n guess_count += 1\n\n master_list = prune_list(master_list, input, results)\n\n input = master_list.sample\n end\n\n if matches == 4\n puts \"You lose\"\n else\n puts \"You win\"\n end\n end", "def cycle\n gameover = false\n #keep going until all letters are guessed or they're out of mistakes\n until gameover\n gameover = turn\n end\n end", "def repeat_guess(guess, str)\n if @guess != @str[-1]\n print \"Repeat. Please guess different letter.\"\n return\n end\nend", "def game_play\n until game_over\n graphic\n guess\n end\n end", "def take_turn\n puts @master.show_board\n puts \"\\nPick your colors (one by one):\"\n puts Mastermind.show_colors.join(\", \")\n\n @master.guess(@codebreaker.enter_colors)\n @robot.get_the_score(@master.score) if @codebreaker == @robot\n end", "def display\n puts \" \"*35 + \"===MasterMind!=== right: guess position\"\n for col in 0..9\n print \" \"*40\n for row in 0..3\n print guess_history[col][row] + ' '\n end\n puts \" \"*15 + guess_result_history[col][0] + \" \" + guess_result_history[col][1]\n puts \"\\n\"\n end\n puts \" \"*20 + \"input 4 color initials for making your choice, 'h' for help, 'q' for quit this round\"\n end", "def show_previous_guesses\n puts 'previous guesses: '\n @guess_switch = 1\n @guesses.each do |array|\n guess_as_circle(array.split)\n end\n @colors_as_circles = @colors_as_circles.each_slice(4).to_a\n @colors_as_circles.each do |a, b, c, d|\n print \"#{a} #{b} #{c} #{d}\\n\"\n end\n @guess_switch = 0\n end", "def give_hint(guess)\n\n @correct_color = 0\n\n @correct_color_and_position = 0\n\n @colors = {}\n\n puts @colors.inspect\n\n for i in 0..3\n\n if guess[i] == @code[i]\n\n @correct_color_and_position += 1\n\n @colors.key?(\"#{guess[i]}\") ? @colors[guess[i]] += 1 : @colors[guess[i]] = 1\n\n puts \"both\"\n\n else \n\n @colors.key?(\"#{guess[i]}\") ? @colors[guess[i]] += 1 : @colors[guess[i]] = 1\n\n end\n \n puts @original.inspect, @colors.inspect\n\n end\n\n @original.each do |k, v| \n\n if @original.key?(k) && @colors.key?(k)\n \n if @colors[k] == @original[k]\n\n @correct_color += @colors[k]\n\n puts \"added #{k}\"\n\n else\n\n if @original[k] > @colors[k]\n\n @correct_color += @colors[k]\n\n puts \"added #{k}\"\n\n else\n\n @correct_color += @original[k]\n \n end\n\n end\n\n end\n\n end\n\n puts @colors.inspect\n\n @last_guess = guess\n\n puts \"You have #{@correct_color_and_position} in the correct position and color, and #{@correct_color} correct colors.\\n\\n\"\n\n end", "def play_again_prompt\n typing_tabbed(\"txt/play_again.txt\")\n tab; print \" >> \"\n play_again = gets.chomp.downcase\n\n if play_again == 'y'\n Mastermind.new.play\n elsif play_again == \"n\"\n tab; \"Acknowledged\\n\".typing\n tab; \"--THIS DEVICE WILL SELF DESTRUCT IN 3 SECONDS--\\n\".typing\n tab; (1..3).reverse_each { |n| \"#{n}..\".typing ; sleep 0.9}\n line\n\n abort\n else\n play_again_prompt\n end\n end", "def play_game\r\n\r\n #Call on the generate_number method in order to get a random number\r\n number = generate_number\r\n $noOfGuesses = 0 \r\n\r\n #Loop until the player inputs a valid answer\r\n loop do\r\n \r\n Console_Screen.cls #Clear the display area\r\n \r\n \r\n\r\n if $noOfGuesses > $maxGameGuesses then\r\n print \"You exceeded the allowed number of guesses of \" + $maxGameGuesses.to_s + \".\"\r\n print \"\\nYou lose! Please try again.\"\r\n print \"\\n\\nPress enter to continue.\"\r\n Console_Screen.pause \r\n break\r\n end\r\n\r\n if $cheatMode == true then\r\n print \"\\nShh.... the answer is \" + number.to_s \r\n end\r\n\r\n #Prompt the player to make a guess\r\n print \"\\n\\nEnter your guess and press the Enter key: \"\r\n \r\n \r\n reply = STDIN.gets #Collect the player's answer\r\n reply.chop! #Remove the end of line character\r\n\r\n reply = reply.to_i\r\n\r\n if reply < 1 || reply > $maxChallengeRange then\r\n Console_Screen.cls\r\n print \"\\nInvalid entry. Please enter a number between 1 and \" + $maxChallengeRange.to_s\r\n print \"\\n\\nPlease press enter to continue.\"\r\n Console_Screen.pause\r\n redo #Redo the current iteration of the loop\r\n end\r\n \r\n $noOfGuesses = $noOfGuesses + 1\r\n \r\n #Analyze the player's guess to determine if it is correct\r\n if reply == number then #The player's guess was correct\r\n Console_Screen.cls #Clear the display area\r\n print \"You have guessed the number! Press enter to continue.\"\r\n Console_Screen.pause #Pause the game\r\n break #Exit loop\r\n elsif reply < number then #The player's guess was too low\r\n Console_Screen.cls #Clear the display area\r\n print \"Your guess is too low! Press Enter to continue.\\n\\n\"\r\n Console_Screen.pause #Pause the game\r\n elsif reply > number then #The player's guess was too high\r\n Console_Screen.cls #Clear the display area\r\n print \"Your guess is too high! Press Enter to continue.\\n\\n\"\r\n Console_Screen.pause #Pause the game\r\n end\r\n\r\n \r\n \r\n end\r\n\r\n end", "def playerguess\n @guess = []\n puts \"What do you think the first colour is?\"\n @guess[0] = gets.chomp.downcase\n #input_validator(@guess[0])\n puts \"What do you think the second colour is?\"\n @guess[1] = gets.chomp.downcase\n #input_validator(@guess[1])\n puts \"What do you think the third colour is?\"\n @guess[2] = gets.chomp.downcase\n #input_validator(@guess[2])\n puts \"What do you think the fourth colour is?\"\n @guess[3] = gets.chomp.downcase\n #input_validator(@guess[3])\n end", "def guess_again(message = nil)\n\t\t# As long as user hasn't run out of turns...\n\t\tif @turn < 12\n\t\t\t# If there was no error message and it isn't the first turn, show the board\n\t\t\tshow_board if (message == nil && @turn > 0)\n\t\t\tputs \n\t\t\tputs \"Enter your guess. You've used #{@turn} of 12 turns.\"\n\t\t\t# Get user's guess\n\t\t\tguess = gets.chomp\n\t\t\t# Put each digit into an array\n\t\t\tarr = []\n\t\t\tguess.split(\"\").each { |num| arr << num.to_i }\n\t\t\t# Make sure guess was correctly inputted\n\t\t\tsanitize_input(arr)\n\t\t# User ran out of turns, show answer\n\t\telse\n\t\t\tputs \n\t\t\tputs \"Whoops! You ran out of turns\"\n\t\t\tputs \"The correct answer was #{@answer.join}\"\n\t\tend\n\tend", "def enter_row\n guess = gets.chomp.split(\",\")\n guess.each{ |color| color.strip! }\n if guess.length != 4\n return false\n end\n guess.each do |color|\n if !Peg::COLORS.include?(color)\n return false\n end\n end\n return guess\n end", "def pick_solution\n \tputs \"Enter your secret code. Choose four colors\"\n \tenter_row\n end", "def give_feedback(guess, code)\n code_colors = code.colors.dup\n guess_colors = guess.colors.dup\n clues = new_clues\n clues = increment_correct_slot(guess_colors, code_colors, clues)\n clues = increment_correct_color(guess_colors, code_colors, clues)\n clues\n end", "def run_guessing_game\n\tuser_number =\"\"\n\twhile user_number != \"exit\"\n\t\tcom_number = rand(1..6)\n\t\tputs \"Guess a number between 1 and 6.\"\n\t\tuser_number = gets.chomp\n\t\tif com_number == user_number.to_i\n\t\t\tputs \"You guessed the correct number!\"\n\t\telse \n\t\t\tputs \"The computer guessed #{com_number}.\"\n\t\tend\n\tend\n\texit_guessing_cli\nend", "def guess_progress(user_guess)\n @letter_found = false\n check_letter(user_guess)\n\n @secret_array.each_index do |i|\n if !@letter\n puts \"Invalid input. Please input a letter\"\n @letter_found = true\n break\n elsif @secret_array[i] == user_guess \n @display_array[i] = \" #{user_guess} \"\n @letter_found = true\n end\n end\n\n puts \"Sorry. '#{user_guess}' is not in the secret word\" if !@letter_found\n\n if !check_finish\n increment_guess(user_guess)\n end \n\n @display_array\n @is_over\n end", "def play\n puts\n puts \"How good is your memory? Let's play a memory game.\"\n puts \"Match the cards!\"\n puts\n sleep(3)\n until game_over?\n board.render\n #get_pos from the player\n begin \n make_guess(get_player_input)\n rescue => exception\n puts \"Select a card that hasn't been flipped\"\n retry\n end\n \n end\n puts \"I guess your memory is pretty good :D\"\n end", "def enter_colors\n colors = []\n until colors.length == 4\n pick = gets.chomp.downcase\n if input_validation(pick)\n colors << pick\n else\n puts \"Don't know that color, sorry.\"\n end\n end\n puts \"You picked #{colors.join(\", \")}\"\n colors\n end", "def lose\n if @guess_count >= 12\n puts Rainbow(\"You lose!\").color(:red)\n true\n else\n false\n end\n end", "def try_again\n wave_display\n puts \"Here's the word again...\"\n\tputs $current_word.colorize(:color => :black, :background => :green) + \"\\r\"\n sleep($hide_speed)\n player_input_word\nend", "def repeat_game\n sleep(1)\n puts \"\\n-----------------------\"\n puts \"You currently have $#{@wallet} left.\".colorize(:blue)\n sleep(1)\n if @wallet <= 0\n sleep(3)\n cash_out\n else\n puts \"What would you like to do next?\"\n puts \"1) Play Again\"\n puts \"2) Cash Out\"\n print \"> \"\n choice = gets.to_i\n case choice\n when 1\n sleep(1)\n initialize(@player)\n when 2\n puts \"Goodbye!\"\n sleep(1)\n cash_out\n else\n puts \"invalid entry, try again\"\n sleep(1)\n repeat_game\n end \n end \n end", "def guessgame\n prev_guess = 0\n comp_guess = ((rand * 100) + 1).floor\n attempts = 10\n while attempts >= 1\n print \"Guess the number : \"\n user_guess = gets.chomp.to_i\n if user_guess != prev_guess\n if user_guess > comp_guess\n puts 'Guess was higher'\n elsif user_guess < comp_guess\n puts \"Guess was lower\"\n else\n puts \"You are correct, the answer is #{comp_guess} indeed!\"\n break\n end\n attempts -= 1\n prev_guess = user_guess\n else\n puts \"Guess was same as last attempt, try a different number\"\n end\n puts \"You have #{attempts} attempts left\"\n end\nend", "def new_guess(tries)\n\tputs \"You have #{3-tries} more tries:\"\n\tgets\nend", "def player_choice_acquisition\n puts \"Choose between the following colors: #{@colors}\"\n (1..@code_length).map do |choice|\n user_input = nil\n until @colors.include?(user_input)\n puts \"indicate choice N°#{choice}:\"\n user_input = gets.chomp\n end\n user_input\n end\n end", "def choose(player)\n done = false\n until done do\n \n puts \"Player \"+player.to_s+\", choose a row (0-2)\"\n row = gets.chomp.to_i\n redo if invalid?(row)\n \n puts \"Player \"+player.to_s+\", choose a column (0-2)\"\n col = gets.chomp.to_i\n redo if invalid?(col)\n \n if $game_board[row][col]\n puts \"That square [#{row},#{col}] is already used. Try again.\"\n redo\n end \n \n $game_board[row][col] = player\n done = true\n end\nend", "def player_input \n while user_input = gets.chomp \n case user_input\n when \"2\"\n puts \"Try again\".colorize(:yellow)\n puts\n when \"1\"\n system ('clear')\n puts \"You're doing great! Keep it up!\".colorize(:white).colorize(:background => :blue)\n puts\n break\n else\n puts \"Please select either 1 or 2\".colorize(:yellow)\n end\n end\nend", "def start_game\n code_guessed = false\n if @player.is_creator?\n @s = %w(r g b y c m).repeated_permutation(4).to_a\n @s[0], @s[7] = @s[7], @s[0]\n puts \"Enter your secret code! Pick 4: (R)ed, (G)reen, (Y)ellow, (B)lue, (M)agenta, and (C)yan:\"\n @code = gets.chomp.downcase.scan(/[rgybmc]/).map {|n| n}\n @code_guess = []\n while !@game_over\n computer_guess\n if @code == @code_guess\n puts \"The computer guessed the code correctly!\"\n puts \"The code was #{@code}\"\n @game_over = true\n else\n results = guess_feedback(@code_guess)\n @s.delete_if {|possible_guess| guess_feedback(possible_guess) == results}\n print_board\n @turns -=1\n end\n sleep(1)\n puts \"#{@turns} left for the CPU!\" unless @game_over || @turns == 0\n if @turns == 0\n puts \"The computer ran out of guesses! The code was #{@code}!\"\n @game_over = true\n end\n end\n else\n generate_code\n while !@game_over\n guess\n if @code == @code_guess\n puts \"You guessed correctly!\"\n puts \"The code was #{@code}\"\n @game_over = true\n else\n guess_feedback(@code_guess)\n print_board\n @turns -=1\n end\n puts \"#{@turns} guesses left!\" unless @game_over || @turns == 0\n if @turns == 0\n puts \"You ran out of guesses! The code was #{@code}!\"\n @game_over = true\n end\n end\n end\n end", "def guessingGame()\n\t# Track number of tries\n\ttries = 1;\n\n\t# Generate a random number to start the game off.\n\tsecretNumber = rand(0..100)\n\n\tputs \"Guess a number between 1 and 100\"\n\n\twhile true\n\t\tputs \"Enter 'q' to end the game and see the secret number\"\n\t\tguess = gets.chomp()\n\n\t\tif guess.to_i == secretNumber\n\t\t\tputs \"Bingo Bango! The secret number was #{guess}\"\n\t\t\tputs \"You got it in #{tries} tries\"\n\t\t\tbreak;\n\t\telsif guess == 'q'\n\t\t\tputs \"The secret number is: #{secretNumber}\"\n\n\t\t\tputs \"Play again? (y/n)\"\n\t\t\treplay = gets.chomp()\n\t\t\t\n\t\t\tunless replay == 'y' || replay == 'n'\n\t\t\t\tputs \"Play again? (y/n)\"\n\t\t\t\treplay = gets.chomp()\n\n\t\t\telse\n\t\t\t\tif replay == 'y'\n\t\t\t\t\tguessingGame()\n\t\t\t\telsif replay == 'n'\n\t\t\t\t\tputs \"Game Over\"\n\t\t\t\t\tbreak;\n\t\t\t\tend\n\t\t\tend\n\t\telsif guess.to_i < secretNumber\n\t\t\tputs \"Guess a higher number\"\n\t\t\ttries += 1\n\t\telsif guess.to_i > secretNumber\n\t\t\tputs \"Guess a lower number\"\n\t\t\ttries += 1\n\t\tend\t\n\tend\nend", "def play_game\n\n\t#call on the generate_number method to get a random number\n number = generate_number\n\n #loop unt the player inputs a valid answer\n loop do \n \tConsole_Screen.cls\n\n \t#prompt the player to make a guess\n \tprint \"\\nEnter your guess and press the Enter key: \"\n\n \treply = STDIN.gets #collect the player's answer\n \treply.chop! #remove the end of line character\n \treply = reply.to_i #conver the player's guess to an integer\n\n \t#validate the player's input only allowing gueses from 1 to 100\n \tif reply == \"c\"\n\n \t\tConsole_Screen.cls\n\n \t\tSQ.display_credits\n \t\tputs $gameCount\n \t\tbreak\n \telsif reply < 1 or reply > 1000 then\n \t\tConsole_Screen.cls\n \t\tprint \"Guesses must be between 1 and 1000. Press Enter to continue\"\n\n \t\tConsole_Screen.pause\n \t\tredo #redo the current iteration of the loop\n end\n\n #analyze the players guess to determine if it is correct\n if reply == number then #the player's guess was correct\n\n \tConsole_Screen.cls #clear the display area\n \t$noOfGuesses = $noOfGuesses + 1\n \tprint \"You have guessed the number! Press Enter to continue.\"\n \tConsole_Screen.pause #pause the game\n \tbreak #exit loop\n elsif reply < number then #the player's guess was too low\n\n \tConsole_Screen.cls #clear the display area\n \t$noOfGuesses = $noOfGuesses + 1\n \tprint \"Your guess is too low! Press Enter to continue.\"\n \tConsole_Screen.pause #puse the game\n \n elsif reply > number then\n\n \tConsole_Screen.cls #clear the display are\n \t$noOfGuesses = $noOfGuesses + 1\n \tprint \"Your guess is too high! Press Enter to conitnue.\"\n \tConsole_Screen.pause #pause the game\n\n end\n\nend\nend", "def computer_guess\n if @white_pegs == 4\n @guess = @guess.split.shuffle\n else\n @guess = Pattern::ORBS.sample(4)\n end\n @guess = @guess.join(' ')\n # Ensures that computer does not guess same code twice.\n if @guesses.any? { |i| i[@guess] } == true\n computer_guess\n else\n # Stores each valid guess. \n @guesses.push(@guess)\n guess_as_circle(@guess.split)\n puts @colors_as_circles.join(' ')\n if @guess == @secret_code\n @winner = true\n end\n end\n end", "def play\n puts \"Let's play 'Bulls and Cows'! (if you are stuck enter 'resign' or 'quit')\"\n loop do \n prompt()\n @user_try = gets.chomp()\n if @user_try == 'resign'\n @result = :resign\n puts \"Number was #{@guess_num.join}\"\n break\n end\n if @user_try == 'quit'\n @result = :quit\n puts \"Number was #{@guess_num.join}\"\n break\n end\n @result = test() if correct?\n end\n return @result\n end", "def game_loop(game, word)\n game.update_screen(word)\n\n until game.correct_positions.all? { |position| position == true }\n special_result = game.read_and_validate_input(word)\n break if special_result == 'QUIT'\n\n if special_result == 'SAVE'\n game.save_game(word)\n break\n end\n\n if special_result == 'WORD'\n correct = game.guess_word(word)\n if correct\n puts \"\\n\\nCongratulations! You won!\"\n game.update_screen(word, true)\n break\n else\n puts \"\\nYou were wrong :(\"\n game.update_screen(word)\n next\n end\n end\n\n if special_result == 'GAMEOVER'\n puts \"\\n\\nYour man has fallen to his doom :(\"\n puts \"The Word was #{word}\\n\"\n puts 'Game over..'.red\n break\n end\n\n system('clear')\n game.update_screen(word)\n end\nend", "def guess_again\n puts (\"Would you like to enter another guess? (y/n)\")\n end", "def run_guessing_game\n \n random_num = rand(6) + 1\n guess_num_between_1_6 = \"\"\n user_input = \"exit\" \nend", "def run_guessing_game\n\tguess = \" \"\n\twhile guess != \"exit\" \n\t\tputs \"Guess a number between 1 and 6.\"\n\t\tguess = gets.chomp\n\t\trandom_num = rand(1..6).to_s\n\t\t#binding.pry\n\t\tif guess == random_num\n\t\t\tputs \"You guessed the correct number!\"\t\t\n\t\telse\t\n\t\t\tputs \"The computer guessed #{random_num}.\"\n\t\tend\n\tputs \"Goodbye!\"\nend\nend", "def prompt\n until game_won? || game_tied?\n\n display_board\n puts \"Player #{@current_player.name}, it's your turn!\"\n puts \"Please enter the numbered square where you'd like to player you #{@current_player.mark}\"\n input_square = get_input\n\n until input_square && @board.is_clear?(input_square)\n puts \"Sorry, that's not an option. Try again!\"\n input_square = get_input\n end\n\n play(@current_player.mark, input_square)\n switch_player\n\n end\n\n if game_tied?\n display_board\n puts 'GAME TIED!'\n else\n switch_player\n display_board\n puts \"#{@current_player.name.upcase} WINS THE GAME!\"\n end\n end", "def play_game(value)\r\n\r\n #Call on the generate_number method in order to get a random number\r\n number = generate_number \r\n\r\n\tif value == \"c\"\r\n\t\tConsole_Screen.cls\r\n\t\tputs \"\\n\\nCHEAT MODE: secret number is \" + number.to_s \r\n\t\tConsole_Screen.cls\r\n\tend\r\n\t\r\n #Loop until the player inputs a valid answer\r\n loop do\r\n \r\n Console_Screen.cls #Clear the display area\r\n \r\n #Prompt the player to make a guess\r\n print \"\\nEnter your guess and press the Enter key: \"\r\n \r\n reply = STDIN.gets #Collect the player's answer\r\n reply.chop! #Remove the end of line character\r\n reply = reply.to_i #Convert the player's guess to an integer\r\n \r\n\t #Increment Guesses Variable\r\n\t $noOfGuesses += 1\r\n\t\r\n #Validate the player's input only allowing guesses between 1 and 100\r\n if reply < 1 or reply > 1000 then\r\n\t \r\n\t\tConsole_Screen.cls #clear screen\r\n\t\t\r\n\t\tputs \"\\nInvalid input was entered!\"\r\n\t\tprint \"\\n\\Only use numbers from 1 to 1000 for guesses. Press enter to continue.\" \r\n\t\t\r\n\t\tConsole_Screen.pause\r\n\t \r\n redo #Redo the current iteration of the loop\r\n end\r\n \r\n #Analyze the player's guess to determine if it is correct\r\n if reply == number then #The player's guess was correct\r\n Console_Screen.cls #Clear the display area\r\n print \"You have guessed the number! Press enter to continue.\"\r\n Console_Screen.pause #Pause the game\r\n break #Exit loop\r\n elsif reply < number then #The player's guess was too low\r\n Console_Screen.cls #Clear the display area\r\n print \"Your guess is too low! Press Enter to continue.\"\r\n Console_Screen.pause #Pause the game\r\n elsif reply > number then #The player's guess was too high\r\n Console_Screen.cls #Clear the display area\r\n print \"Your guess is too high! Press Enter to continue.\"\r\n Console_Screen.pause #Pause the game\r\n end\r\n \r\n\t\tif $noOfGuesses >= 10\t\t#Max guess attemps (10)\r\n\t\tConsole_Screen.cls \t#clear screen\r\n\t\t\tprint \"Your \" + $noOfGuesses.to_s + \" guesses are at max allowable attempts. Press enter to continue.\"\r\n\t\t\tConsole_Screen.pause\t#pause the game\r\n\t\t\tbreak\t#break loop\r\n\t\tend\r\n end\r\n\r\n end", "def choose_colour(board,width,height,count,finished_game_counter,current_score)\n \n #prints the array\n board.each do |row|\n row.each do |column| \n \n #the field is coloured according to the element in the array \n if column == :red\n \n #prints the element as two space characters with a coloured background\n print column = \" \".colorize(:background => :red)\n elsif column == :green\n print column = \" \".colorize(:background => :green)\n elsif column == :blue\n print column = \" \".colorize(:background => :blue)\n elsif column == :yellow\n print column = \" \".colorize(:background => :yellow)\n elsif column == :magenta\n print column = \" \".colorize(:background => :magenta)\n elsif column == :cyan\n print column = \" \".colorize(:background => :cyan)\n end \n end\n puts\n end\n \n #display number of turns\n print \"Number of turns: \"\n turns(count)\n \n #display percentage completed\n print \"Current completion: \"\n completion(board,width,height,count,finished_game_counter,current_score)\n \n #gets user input r,g,y,b,c,m and increases count by one\n print \"Choose a colour: \"\n user_colour = gets.chomp.downcase\n count = count + 1\n \n #the user_colour variable will be set , depending on the user input\n if user_colour == \"r\"\n user_colour = :red\n change_colour(board,user_colour,width,height,count,finished_game_counter,current_score)\n elsif user_colour == \"g\"\n user_colour = :green\n change_colour(board,user_colour,width,height,count,finished_game_counter,current_score)\n elsif user_colour == \"b\"\n user_colour = :blue\n change_colour(board,user_colour,width,height,count,finished_game_counter,current_score)\n elsif user_colour == \"y\"\n user_colour = :yellow\n change_colour(board,user_colour,width,height,count,finished_game_counter,current_score)\n elsif user_colour == \"m\"\n user_colour = :magenta\n change_colour(board,user_colour,width,height,count,finished_game_counter,current_score)\n elsif user_colour == \"c\"\n user_colour = :cyan\n change_colour(board,user_colour,width,height,count,finished_game_counter,current_score)\n \n #if user types q, he will return to the main menu\n elsif user_colour == \"q\"\n main_menu(width,height,current_score,finished_game_counter)\n end\nend", "def repeat_guess(letter)\n\t\tif @attempted_letters.include?(letter)\n\t\t\tputs \"you already guessed that...\"\n\t\t\treturn true\n\t\tend\n\t\t\treturn false\n\tend", "def player_input2 \n while user_input = gets.chomp \n case user_input\n when \"1\"\n puts \"Try again\".colorize(:yellow)\n puts\n when \"2\"\n system ('clear')\n puts \"You're an awesome friend!\".colorize(:black).colorize(:background => :magenta)\n puts\n break\n else\n puts \"Please select either 1 or 2\".colorize(:yellow)\n puts\n end\n end\nend", "def repeat?\n answer = \"\"\n\n until answer.match(/[y|n]/)\n puts \"\\nWould you like to play again? (y/n)\"\n answer = gets.strip.downcase\n puts \"★ Thanks for playing. Come back soon! ★\" if answer == \"n\"\n end\n answer\nend", "def make_a_guess\n puts \"make a guess between 1 and 4\"\n rand_num = rand(4) + 1\n while gets.chomp.to_i != rand_num\n puts \"try again\"\n end\n puts \"you guess the number!\"\nend", "def nocurses\n while true\n updateboard\n puts outboard\n sleep(0.5)\n answer = gets\n break if answer.chomp == 'e'\n end\n end", "def play_again\n puts '¿Quieres volver a jugar S/N'\n @answer = gets.chomp.downcase\n if @answer == 's'\n @active_player = check_last_winner\n # The game creates a new board and assigns it to the players\n @board = Board.new\n @board.fill_board\n @player1.board(@board)\n @player2.board(@board)\n # Then the game calls the function to star the game again\n startGame\n end\n end", "def guess\n @guess = []\n i = 1\n \n if @game_mode == 1\n\t puts \"Enter your moves.\"\n\t 4.times do |i|\n\t puts \"Position #{i + 1}:\"\n\t\tposition = gets.chomp.to_i\n\t\tvalid = false\n\t\tuntil valid\n\t\t if @guess.include?(position)\n\t\t\tputs \"You already guessed that number. Try again:\"\n\t\t\tposition = gets.chomp.to_i\n\t\t elsif ((position > 6) || (position < 0))\n\t\t\tputs \"Enter a valid number:\"\n\t\t\tposition = gets.chomp.to_i\n\t\t else\n\t\t\t@guess << position\n\t\t\tvalid = true\n\t\t end\n\t\tend \n\t end\n\telse\n\t 4.times do\n\t\tposition = rand(1..6)\n\t\t@guess << position \n\t end\n\tend\n \n end", "def play\n over = false\n\n until over\n display_score\n @board.render\n @guesses = []\n prompt\n if same_card?(@guesses[0], @guesses[1])\n puts \"you got it! Go again!\"\n @guesses[0].reveal_card\n @guesses[1].reveal_card\n @score[current_player] += 1\n else\n @guesses[0].hide\n @guesses[1].hide\n puts \"you suck! Go #{previous_player} is up.\"\n next_player!\n end\n\n\n over = true if board.won?\n end\n end", "def play\n\n @board.render(clear=true)\n until @board.won? \n\n # should call reveal two time\n guess_1 = @board.reveal # gonna get position guess from the user and return the value\n @board.render(clear=true)\n guess_2 = @board.reveal # gonna get position guess from the user and return the value\n @board.render(clear=true)\n \n if guess_1 == guess_2\n puts \"It's a match!\" \n else\n # reset the board - set all card to face_up == false\n puts \"Try Again!\"\n @board.reset \n end\n sleep 3\n \n @board.render(clear=true)\n end\n end", "def play_again?\n print \"\\nDo you wish to play again? Y/N:\\n\"\n input = gets.chomp.downcase\n\n case input\n when \"y\"\n sleep 1\n self.clear()\n hangman = Game.new\n hangman.display()\n hangman.prompt_player()\n when \"n\"\n print \"\\n\\n Goodbye!!! \\n\\n\".send(:yellow).send(:bold)\n else\n self.clear()\n play_again?()\n end\n end", "def guessing_game\n\n if @comp == @ans\n puts \"#{@comp} is equal to #{@ans}. Congrats!\"\n\n else\n while @comp != @ans do\n puts \"#{@comp} - Is my guess high or low.\"\n hint = gets.chomp.downcase\n\n if hint == \"high\"\n @comp = rand(1...@comp)\n guessing_game\n\n elsif hint == \"low\"\n @comp = rand(@comp..25)\n guessing_game\n end\n end\n end\n \nend", "def guess\n puts \"Write down a guess.\"\n @guess_code.make_seq\n @code_seq = guess_code.sequence\n end", "def play_again(name)\n\tplay_choice = play_again_Q\n\tif play_choice == \"Y\"\n\t\tputs \"\\nHigh five, #{name}! Don't you just love this game?\"\n\t\tsecret_num = rand(10) + 1\n\t\tguess = first_guess.to_i\n\t\ttries = 1\n\t\teval_num(secret_num, guess, tries)\n\t\tplay_again(name)\n\telsif play_choice == \"N\"\n\t\tputs \"\\nThank you for playing #{name}! Come test your mind again when you're bored.\"\n\tend\nend", "def replay(play_again_choice)\r\n if play_again_choice == 'y' || play_again_choice == 'yes'\r\n puts \"\\n---------Good luck---------\\n\"\r\n sleep(2)\r\n system('cls')\r\n elsif play_again_choice == 'n' || play_again_choice == 'no'\r\n puts \"\\n---------Thanks for playing!---------\\n\"\r\n exit\r\n else\r\n puts \"\\nInvalid response entered. Would You Like to Play again? (Yes/No)\"\r\n play_again_choice = gets.chomp.downcase\r\n replay(play_again_choice)\r\n end\r\nend", "def user_prompt\n verify = false\n until verify == true\n\n puts \"\\n\\nWhat's your guess?\"\n guess = String(gets.chomp).downcase.delete(' ')\n verify = guess.delete('rgby').empty? && guess.length == 4\n is_it_q(guess)\n cheat(guess)\n if guess.length != 4\n puts \"Four characters are required. Please try again:\"\n elsif !guess.delete('rgby').empty?\n puts \"Sorry, only r, g, b and y are acceptable as guesses. Please try again:\"\n\n else\n @user_input = guess\n verify = true\n break\n end\n\n end\nend", "def retry_game\n user_retry = $prompt.select(\"Would you like to try again?\", [\"Yes\", \"No\"])\n puts user_retry\n if user_retry == \"Yes\"\n reset_vars\n start_app\n else\n puts \"Come back to fight another day...\"\n end\nend", "def start_game(states,capitals,correct,incorrect)\n50.times do\n question = rand(0..50)\n puts \"what is the Capitol of \" + states[question]\n answer = gets.chomp\n if answer == capitals[question]\n \t puts \"You ROCKSTAR!\"\n correct += 1\n puts \"You have #{correct} correct answer(s) so far - and #{incorrect} incorrect answer(s)\"\n check_finish(states,capitals,correct,incorrect)\n else\n \t puts \"Learn our map... you UNPATRIOT!\"\n incorrect +=1\n puts \"You have #{incorrect} incorrect answer(s) so far - and #{correct} correct answer(s)\"\n check_finish(states,capitals,correct,incorrect)\n end\n end\nend", "def game_play\n self.new_round # Start new round\n loop do\n puts \"Your Guess:\"\n @input = gets.chomp # Gets user input by making a prompt and capturing the input, chomp method removes the new line which would otherwise be stored in the input string\n if @input != \"/\"\n if @@word.legal?(@input)\n if self.check_guess\n puts \"Correct! You got the answer in #{@guess_count} guesses!\" # Use string interpolation instead of concatenating to include guess count\n\n self.new_round\n puts @current_word\n end\n puts \"You have #{MAX_GUESS - @guess_count} guesses remaining.\\n?????\" # Calculations can be made in string interpolation too\n else\n puts \"That guess doesn't count! Your guess can only be 5 characters in length, contain no duplicate letters and only contain letters.\" # Guess isn't a legal word\n end\n self.check_lost # Check if player has exhausted amount of guesses\n else\n exit\n end\n\t\tend\n\tend", "def run_guessing_game\n gaming = true\n\n\n while gaming do\n random = rand(1..6)\n puts \"Guess a number between 1 and 6.\"\n user = gets.chomp\n\n if user == \"exit\"\n puts \"Goodbye!\"\n playing = false\n break\n\n elsif random == user.to_i\n puts \"You guessed the correct number!\"\n\n else\n puts \"The computer guessed #{random}.\"\n\n end\n end\nend", "def verify_guess(array)\n unless array.all?{|x| $colors_strings.include?(x)} && array.count == 4\n puts \"\\nInvalid guess\"\n guess\n end\n end", "def game_round\n phrases = @phrasearr.dup\n @difficulty[3].times do\n clear\n phrase = selector(phrases)\n display_to_user(phrase)\n phrases = deleter(phrase, phrases)\n end\n\n clear\n prompt = selector(@promptarr)\n display_to_user(prompt)\n input = timed_input\n print @cursor.hide\n scorer(checker(input, prompt))\n deleter(prompt, @promptarr)\n\n check_score\n end", "def star_loop\n selection = ''\n while true\n puts \"Type the number of the post that you want to learn about\"\n print \" (or hit return to view all again, you ego-maniac) >> \"\n selection = $stdin.gets.chomp\n break if ['','q','quit','exit','fuckthis'].include?(selection.downcase)\n show(selection)\n end\n display if selection == ''\n end", "def game_over\n puts 'You have run out of guesses! Would you like to play again? (y/n)'\n answer = gets.chomp\n if answer == 'y'\n system 'clear'\n Display.new\n elsif answer == 'n'\n system 'clear'\n system 'exit'\n else\n game_over\n end\n end", "def player_guess\n print \"What four colours do you think? turns:#{@turns}\\n\"\n @current_guess = gets.chomp.split(\" \")\n #check input has been correctly inputed\n if input_correct?(@current_guess) == false\n print \"Incorrect input please try again you still have #{@turns} left\\n\"\n player_guess\n #Does the guess match the hidden code\n elsif correct_guess?(@current_guess)\n print \"Correct the hidden code was: #{@current_guess}\"\n #Has the player run out of turns\n elsif @turns<=0\n print \"Sorry no more turns left the hidden code was: #{@hidden_code}\"\n else\n @turns -= 1\n @guess_array.push(@current_guess.join(\" \").split(\" \"))\n print_board(@current_guess)\n player_guess\n end\nend", "def give_hint()\n \n sleep(2)\n puts \"hint...\"\n sleep(1)\n\n correct_colors = []\n correct_place = 0\n\n #count matching colors\n $computer[\"code\"].map { |value| if $player[\"code\"].include?(value)\n if !correct_colors.include?(value)\n correct_colors.push(value) \n end\n end }\n\n #update object\n $computer[\"correct_colors\"] = correct_colors\n\n #report matching colors \n if correct_colors.length() > 0\n puts \"#{correct_colors.length()} of the colors that the computer chose are accurate...\"\n end \n \n #count matching placement of matching colors\n correct_colors.map { |value| $computer[\"code\"].index(value) == $player[\"code\"].index(value) }.map { |value| if value == true\n correct_place += 1\n end}\n\n #update object\n $computer[\"correct_place\"] = correct_place\n\n puts \"... and #{correct_place} in the correct place\" \n \n sleep(3)\n\n end", "def play_again\n puts (\"Do you want to play another game ? Yes / No\").red\n print \">\"\n decision = gets.chomp\n while decision != \"Yes\" && decision != \"No\"\n puts \"Invalid answer. Do you want to play again ? Please, type 'Yes' or 'No'\"\n print \">\"\n decision = gets.chomp\n end\n return decision\n end", "def start_new_game?\n begin\n puts \"Would you like to play again? (Y/N)\"\n continue_answer = gets.chomp.upcase\n end until [\"Y\",\"N\"].include?(continue_answer)\n continue_answer == \"Y\"\n end", "def guessing_for_the_win(letter)\n\t\tif repeat_guess(letter)\n\t\telsif guess_correct(letter)\n\t\telse wrong_guess(letter)\n\t\tend \n\n\t\tif @secret_word == @display\n\t\t\tcongrats\n\t\telsif @number_of_guesses > 0 \n\n\t\t\tp \"Keep guessing\"\n\t\telse\n\t\t\tfailure\n\t\tend\n\tend", "def check_guess(guess)\n\t\tif @guesses.include?(guess)\n\t\t\tputs 'This is a repeated guess. Try another letter!'\t\n\t\telse\n\t\t\t@guesses << guess\n\t\t\tif @secret_word_arr.include?(guess)\n\t\t\t\t# logic to show the guess inside the @dashes_arr\n\t\t\t\t@word_length.times do |i|\n\t\t\t\t\tif @secret_word_arr[i] == guess\n\t\t\t\t\t\t@dashes_arr.delete_at(i)\n\t\t\t\t\t\t@dashes_arr.insert(i, guess)\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\telse\n\t\t\t\t@guess_count += 1\n\t\t\t\tattempts = @word_length - @guess_count\n\t\t\t\tputs \"Incorrect guess. You have #{attempts.to_s} attempts left. Try again!\"\n\t\t\tend\n\t\tend\n\t\t@display_chars\n\tend", "def turn()\n\tguess = \"\"\n\tif @human_is_guessing == true\n\t\tputs \"Enter your guess for the secret code\"\n\t\tguess = gets.chomp\n\t\twhile guess.length != 4 || (guess =~ /[^0-5]/)\n\t\t\tputs \" Please enter guess again. Remember it must be 4 numbers and only consist of values 0-5.\"\n\t\t\tguess = gets.chomp\n\t\tend\n\telse #computer is guessing\n\t\t#random guess from array of viable solutions.\n\t\tguess = @solutions.delete_at(rand(@solutions.length)).join.to_s\n\t\t\tputs \" I guess \" + guess\n\t\tend\n\t\tset_guess(guess)\n\t\t@score = compare_guess(guess)\n\t\tputs \"You have correctly guessed \" + @colors_correct.to_s + \" out of 4 digits in the code\" + \" and have \" + @placed_correct.to_s + \" in the right place.\"\n\t\[email protected]_if {|x| compare_guess(x.join.to_s, @guess) != @score }\n\t\t@turns += 1\n\tend", "def guess\n puts 'Guess a number between 1-100, You only have 5 tries to get it right.'\n\n guess = gets.to_i\n\n if guess == @number\n puts 'WOW!! You guessed it on the first try!'\n else\n keep_guessing guess\n end\n end", "def game_loop\n\t\tputs \"Welcome to the game of RED versus BLACK in a \"\n\t\tputs \"Connecting Four battle of wits and colored tokens\"\n\t\tputs \"\"\n\t\tprint \"#{ @turn %2 == 0 ? \"BLACK\" : \"RED\" } will go first!\\n\\n\"\n\t\n\t\tloop do\n\t\t\tdisplay_board\n\n\t\t\t@turn % 2 == 0 ? player = \"black\" : player = \"red\"\n\t\t\tprint \"#{player.upcase}! Into which column would you like to drop your token?\"\n\n\t\t\tinput = 100\n\t\t\tuntil valid?(input)\n\t\t\t\tinput = gets.chomp.to_i \n\t\t\tend\n\n\t\t\tsite = drop_token(player, input)\n\n\t\t\tif winner?(site)\n\t\t\t\tputs \"\\n\\n\"\n\t\t\t\tdisplay_board\n\t\t\t\tputs \"Congratulations #{player}! You've won!\"\n\t\t\t\texit\n\t\t\tend\n\n\t\t\t@turn += 1\n\t\tend\n\tend", "def guessing_game\n\tprint \"Hello, what should I call you? \"\n\tname = gets.chomp\n\tnumber = rand(100) + 1\n\tguesses_remaining = 10\n\tis_correct = false\n\tputs \"Welcome #{name}! It's time to play Guess My Number!\"\n\tputs \"I'm thinking of a number between 1 and 100, can you guess what it is?\"\n\twhile guesses_remaining > 0\n\t\tif guesses_remaining == 1\n\t\t\tputs \"You only have 1 guess remaining!!!\"\n\t\telse\n\t\t\tputs \"You have #{guesses_remaining} guesses remaining.\"\n\t\tend\n\t\tputs \"What do you guess?\"\n\t\tguess = gets.to_i\n\t\tguesses_remaining -= 1\n\t\tif guess == number\n\t\t\tif (10 - guesses_remaining) == 1\n\t\t\t\tputs \"Good job, #{name}! You guessed my number in only 1 guess!!!\"\n\t\t\telse\n\t\t\t\tputs \"Good job, #{name}! You guessed my number in #{10 - guesses_remaining} guesses!\"\n\t\t\tend\n\t\t\tis_correct = true\n\t\t\texit\n\t\telsif guess > number\n\t\t\tputs \"Oops, your guess was too HIGH!\"\n\t\telse\n\t\t\tputs \"Oops, your guess was too LOW!\"\n\t\tend\n\tend\n\n\tunless is_correct\n\t\tputs \"Sorry, you didn't get my number. It was #{number}!\"\n\t\tputs \"Better luck next time!\"\n\t\tplay_again?\n\tend\nend", "def game(boardGame)\n #init local game variables\n completed = 0\n turns = 0\n quit = false\n \n #begin game loop\n begin\n #get old colour from top left and check the games completion \n oldColor = boardGame[0][0]\n completed = completion boardGame, oldColor\n \n #if completed >= 100 then the game will end\n if completed < 100 then\n printGrid boardGame\n puts \"Number of turns: #{turns}\"\n puts \"Current completion: #{completed}%\"\n print \"Choose a colour: \"\n color = gets.chomp\n \n #checks if input is a valid colour.\n #true --> game is updated\n #if q --> quit to menuMain\n #else re-print board and wait for an other input \n if isColor(color.downcase) then\n turns += 1\n setColors boardGame, getColor(color.downcase), oldColor, 0, 0\n system(\"clear\")\n elsif color == \"q\" then\n quit = true\n else\n system(\"clear\")\n end\n else\n quit = true\n end\n end while quit == false\n #update bestGame score\n if completed >= 100 then\n if turns < $bestGame or $bestGame == 0 then\n $bestGame = turns\n end\n puts \"You won after #{turns} turns\"\n end\n #when game closes\n puts \"Thanks for playing\"\n gets\n system(\"clear\") \nend", "def make_guess\n\tputs \"#{ @name } is thinking...\"\n\tif @first_guess==[]\n\t return make_first_guess\n\telse\n\t # update the last guess, get a random sample from the set of available codes\n\t sleep 1\n\t @last_guess = @set_of_codes.sample \n\t return @last_guess\n\tend\n end", "def guess_number_3\n tries = 1\n random_number = rand(10) # rand gives a random number between 0 and x-1\n puts \"Guess a number, any number!\"\n answer = gets.chomp.to_i\n while answer != random_number\n puts \"Please guess again. You have had #{tries} tries so far.\"\n tries += 1\n answer = gets.chomp.to_i\n end\n puts \"You guessed correctly! The random number is #{random_number}.\"\n puts \"It took you #{tries} tries to guess the number correctly.\"\nend", "def try_again\n while true\n print \"Would you like to play again? [y/n]: \"\n input = gets.strip\n input.downcase!\n case input\n when 'y', 'yes'\n puts 'Playing again'\n play_maze\n when /\\A[nN]o?\\Z/\n break\n end\n end\nend", "def puzzle_game\n\t\t\tloop do\n\t\t\t\t#Set up deck and hand\n\t\t\t\tclear\n\t\t\t\tget_deck\n\t\t\t\tshuffle\n\t\t\t\tget_hand\n\n\t\t\t\t#Set up a single solution game\n\t\t\t\tsolution = find_set\n\t\t\t\tnext if (solution == [])\n\t\t\t\tsolution.each {|card_in_set| removed_card = @hand.delete(card_in_set); break if (find_set != []); @hand << removed_card}\n\t\t\t\tnext if @hand.length < 12\n\t\t\t\[email protected]!\n\n\t\t\t\t#Display message and ask user for input\n\t\t\t\tloop do\n\t\t\t\t\tshow_hand\n\t\t\t\t\tprint \"\\nEnter your set or type 'quit': \"\n\t\t\t\t\tcase user_input = gets.chomp.downcase.split(\",\")\n\t\t\t\t\twhen [\"quit\"]\n\t\t\t\t\t\tsystem('clear'); system('cls')\n\t\t\t\t\t\treturn\n\t\t\t\t\telse\n\t\t\t\t\t\tif good_set_syntax? user_input\n\t\t\t\t\t\t\t# safe to convert user input\n\t\t\t\t\t\t\tuser_input = user_input.map {|card| card.to_i}.sort\n\t\t\t\t\t\t\t# return user defined set in ascending card order\n\t\t\t\t\t\t\tif (user_input == solution.map {|card| @hand.find_index(card)}.sort)\n\t\t\t\t\t\t\t\tsystem('clear'); system('cls')\n\t\t\t\t\t\t\t\tshow_hand\n\t\t\t\t\t\t\t\tputs \"\\n#{user_input[0]},#{user_input[1]},#{user_input[2]}\\nGreat job! You found the only set.\\nHit enter to go back to main menu.\"\n\t\t\t\t\t\t\t\tgets\n\t\t\t\t\t\t\t\tsystem('clear'); system('cls')\n\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\tend\n\t\t\t\t\t\t\tsystem('clear'); system('cls')\n\t\t\t\t\t\t\tputs \"#{user_input[0]},#{user_input[1]},#{user_input[2]}\\nIncorrect set. There is only one soltuion. Try again.\",\"\"\n\t\t\t\t\t\telse\n\t\t\t\t\t\t\tsystem('clear'); system('cls')\n\t\t\t\t\t\t\tputs \"Invalid command or set syntax.\"\n\t\t\t\t\t\tend\n\t\t\t\t\tend\n\t\t\t\tend\n\t\t\t\tputs \"Error in execution.\"\n\t\t\t\tbreak\n\t\t\tend\n\t\tend", "def congratulation(percentage,count,current_score,finished_game_counter,width,height)\n puts \"You won after #{count} turns\"\n puts \"Click enter to continue\"\n again = gets\n if again == \"\\n\"\n main_menu(width,height,current_score,finished_game_counter)\n end\nend", "def start_guess\r\n\t\tuntil @is_over == true\r\n\t#player enters 1 letter string\r\n\r\n\t\t\tputs \"guess a letter you've already guessed #{@used}. #{@tries-@guess_count} attempts remain\"\r\n\t\t\tputs @blanks.join\r\n\t\t\tletter = gets.chomp\r\n\t#if letter is not in guessed box take a turn away\r\n\t\t\tif @used.include?(letter) == false\r\n\t\t\t\t@guess_count += 1\r\n\t\t\t\tif @tries == @guess_count \r\n\t\t\t\t\t@is_over = true\r\n\t\t\t\tend\r\n\t\t#if letter is in the world replace the blank\r\n\t\t\t\tif @word.include?(letter)\r\n\t\t\t\t\twordindex = @word.index(letter)\r\n\t\t\t\t\t@blanks[wordindex] = letter\r\n\t\t\t\t\tif @blanks.include?(\"_ \") == false\r\n\t\t\t\t\t\t@is_over = true\r\n\t\t\t\t\tend\r\n\t\t#if letter is not in the world add to guessed box\r\n\t\t\t\telse\r\n\t\t\t\t\t@used << letter\r\n\t\t\t\tend\r\n\t#if letter is in guessed box don't consume a turn\r\n\t\t\telse\r\n\t\t\t\tputs \"you already tried that letter\"\r\n\t\t\tend\r\n\t\tend\r\n\r\n\t#end:\r\n\t#if word is guessed correctly print congrants\r\n\t\tif @blanks.include?(\"_ \")\r\n\t\t\tputs \"haha try harder next time\"\r\n\t#if word is guessed wrong print taunt\r\n\t\telse\r\n\t\t\tputs \"well done! you guessed the word\"\r\n\t\tend\r\n\tend", "def paper_scissors_rock\n clear_screen\n draw_shovel_line\n\tputs \"\\n Play with the Sphinx's stomach to get out!\\n\"\n sleep(1)\n\tresult = rand(3).floor\n\task_user \"\\n What have you got?:\" , \" \\n\\n 1| for paper \\u{1F4C3}\\n 2| for scissors \\u{2702}\\n 3| for rock \\u{270A}\\n\\n\"\n\tsleep(0.5)\n puts \" \\u{1F449}\"\n choice = gets.chomp.to_i\n\n\tif choice == result\n\t\tsay \"\\nIt's a tie!\\n\"\n sleep(1.5)\n\t\tpaper_scissors_rock\n\telsif choice < result\n\t\tsay \"\\nSorry, you lost\\n Try again!\"\n sleep(1.5)\n\t\tpaper_scissors_rock\n\telsif choice > result\n\t\tputs \"\\n You won! Get out!!!!\\n\"\n sleep(1.5)\n draw_shovel_line\n sleep(1.5)\n\n\tend\nend", "def get_guess\n print \"\\nMake a guess: \"\n guess = gets.chomp.upcase\n\n #validate that guess has not already been guessed and is not blank\n while @bad_guesses.include?(guess) || @user_progress.include?(guess) || guess == \"\"\n puts \"You already guessed that!\" unless guess == \"\"\n print \"Guess again: \"\n guess = gets.chomp.upcase\n end\n return guess\n end", "def instruction_message\n puts \"\\nMASTERMIND is a color guessing game where the computer generates a \\\nrandom string of four characters representing the base colors #{\"(r)ed\".red}, \\\n#{\"(g)reen\".green}, #{\"(b)lue\".blue}, and/or #{\"(y)ellow\".yellow}. \\\nThe intermediate difficulty level is six characters and adds \\\n#{\"(m)agenta\".magenta} and the advanced difficulty level is eight characters \\\nand adds #{\"(c)yan\".cyan}. \\\nThe string is only guaranteed to contain one color. The player must submit \\\nguesses to try to find the generated combination. Guesses are not case sensitive.\"\n\n puts \"\\nEnter #{\"(p)lay\".green}, #{\"(i)nstructions\".yellow} or #{\"(q)uit\".red}\"\n end", "def again\n puts \"Do you want to try it again?(yes/no)\"\n input = gets.chomp.upcase\n if input == \"YES\"\n menu\n elsif input == \"NO\"\n `say \"Thanks for using the awesome \"PICK A NUMBER\" program!\"`\n abort('Bye')\n else \n puts \"Excuse me, I did not understand you. Can you put yes or no? \\n \"\n again\n end\nend", "def make_guess\n puts \"Make a guess:\"\n @current_guess = gets.chomp\n unless good_guess?\n puts \"That is an invalid guess, please try again!\"\n @current_guess = gets.chomp\n end\n puts\n guesses << current_guess unless current_guess == \"save\" || current_guess == secret_word\n end", "def guessing_game\n\tputs \"Guess a number between 1 and 100\"\n\tcorrect = Random.new.rand(1..100)\n\tnum_guesses = 1\n\tcurrent_guess = gets.chomp.to_i\n\n\twhile current_guess != correct\n\t\tif current_guess > correct \n\t\t\tputs \"The number is lower than #{current_guess}. Guess again\"\n\t\telsif current_guess < correct\n\t\t\tputs \"The number is higher than #{current_guess}. Guess again\"\n\t\tend\n\t\tcurrent_guess = gets.chomp.to_i\n\t\tnum_guesses = num_guesses + 1\n\tend\n\tputs \"You guessed #{correct} in #{num_guesses} tries!\"\nend" ]
[ "0.7354492", "0.72172844", "0.71080095", "0.70302904", "0.7019733", "0.69870675", "0.68975174", "0.6859837", "0.6847271", "0.6798628", "0.6536197", "0.6531533", "0.65227264", "0.6501217", "0.64770395", "0.64654136", "0.64632547", "0.6428728", "0.63727206", "0.6339941", "0.63255894", "0.6321913", "0.6263707", "0.6261428", "0.6253161", "0.6233616", "0.62306094", "0.6229593", "0.62104744", "0.6204569", "0.61916876", "0.61785895", "0.6177623", "0.6161912", "0.6158938", "0.6152193", "0.6130364", "0.6129744", "0.6127324", "0.6118807", "0.61064464", "0.6104653", "0.6102616", "0.609724", "0.6091995", "0.60774076", "0.6076463", "0.6075641", "0.607544", "0.6071408", "0.60694855", "0.6062843", "0.6050048", "0.6047772", "0.6024549", "0.6022296", "0.601704", "0.60113263", "0.60055876", "0.60019934", "0.59798616", "0.5977096", "0.59763336", "0.5974905", "0.5970305", "0.59693515", "0.5966186", "0.5962624", "0.5961431", "0.59568036", "0.5956129", "0.5954936", "0.5948856", "0.5948248", "0.59470487", "0.59400904", "0.5938934", "0.5933633", "0.5930153", "0.59286344", "0.592569", "0.59238464", "0.5922507", "0.59218705", "0.5920737", "0.59155154", "0.59094137", "0.5909134", "0.5903554", "0.58964604", "0.5895987", "0.58899724", "0.5888245", "0.58811986", "0.5876554", "0.5874846", "0.5871538", "0.5869984", "0.58654326", "0.5864676" ]
0.7541607
0
Adds a random color to the sequence
def add_random_color seq << COLORS.sample end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_color\n if !color\n self.color = %w(\n #000000 #0000FF #00FF00 #FF0000 #FFFF00 #9900CC\n #CC0066 #00FFFF #FF00FF #C0C0C0 #00008B #FFD700\n #FFA500 #FF1493 #FF00FF #F0FFFF #EE82EE #D2691E\n #C0C0C0 #A52A2A #9ACD32 #9400D3 #8B008B #8B0000\n #87CEEB #808080 #800080 #008B8B #006400\n ).sample\n end\n end", "def random_color\n self.color = [\"#80b891\", \"#f89f81\", \"#586576\", \"#f0d2a8\"].sample\n end", "def random_color\n color = \"#%06x\" % (rand * 0xffffff)\n while colors.include?(color)\n color = \"%06x\" % (rand * 0xffffff)\n end\n colors << color\n color\n end", "def generate_random_color\n COLORS[rand(6)]\n end", "def generate_random_color\n gen = ColorGenerator.new saturation: COLOR_SATURATION,\n lightness: COLOR_LIGHTNESS\n self.color = \"##{gen.create_hex}\"\n self\n end", "def rand_color\n Graphic::COLORS[rand(Graphic::COLORS.size - 1)]\n end", "def random_color\n return Color.new(random(COL_DEP), random(COL_DEP), random(COL_DEP), 255)\nend", "def change_color\n @color = Gosu::Color.rgb(rand * 255, rand * 255, rand * 255)\n end", "def set_color\n self.color = [\"#7AD8E5\", \"#63b4d1\", \"6da7d3\", \"#7699d4\", \"#816ec4\", \"#8658bc\", \"#602278\", \"#34023C\"].sample\n end", "def random_colors\n colors = [\"blue\",\"red\",\"green\",\"black\",\"yellow\",\"pink\",\"white\",\"grey\"]\n color = colors[rand(colors.length)]\nend", "def add_colors\n\tNcurses.start_color\n\tcolors = %w[RED BLUE GREEN MAGENTA CYAN YELLOW]\n\tcolors.each { |color|\n\t\teval \"Ncurses.init_color( Ncurses::COLOR_#{color}, #{rand(0..1000)}, #{rand(0..1000)}, #{rand(0..1000)} )\"\n\t}\n\t#Ncurses.init_pair( PAIR_NUMBER, BORDER_LINE_COLOR, BORDER_COLOR)\n\trandom_color = eval \"Ncurses::COLOR_#{colors.sample}\"\n\tNcurses.init_pair(2, random_color, Ncurses::COLOR_RED)\n\tNcurses.init_pair(3, random_color, Ncurses::COLOR_BLUE)\n\tNcurses.init_pair(4, random_color, Ncurses::COLOR_GREEN)\n\tNcurses.init_pair(5, random_color, Ncurses::COLOR_MAGENTA)\n\tNcurses.init_pair(6, random_color, Ncurses::COLOR_CYAN)\n\tNcurses.init_pair(7, random_color, Ncurses::COLOR_YELLOW)\nend", "def color_random\n Color::RGB.new(rand(255),rand(255),rand(255))\n end", "def pick_color\n if @@colors.length > 0\n i = rand(0..@@colors.length-1)\n self.color = @@colors[i]\n @@colors.delete_at(i);\n else\n self.color = 'default'\n end\n end", "def get_random_color\n\t\tletters = '0123456789ABCDEF'.split('');\n\t\tcolor = '#';\n\t\tprng = Random.new\n\t\tfor i in 0...6\n\t\t\tprng.seed\n\t\t\tcolor += letters[(prng.rand * 15).round];\n\t\tend\n\t\tcolor\n\tend", "def gen_color()\n random = Random.new\n\n colorname = Array.new(9)\n colorname = [ 'red', 'blue', 'lightblue', 'pink', 'gold', 'white', 'yellow', 'green', 'lightgreen', 'orange', 'grey', 'black' ]\n\n return colorname[random.rand(9)]\nend", "def select_random_code\n code = \"\"\n 4.times { code += @colors.sample }\n return code\n end", "def random_colors\n color_array = []\n 4.times {color_array.push @colors.sample}\n color_array\n end", "def getRandomColor()\n num = Random.rand(1..6)\n if num == 1 then return :red\n elsif num == 2 then return :blue\n elsif num == 3 then return :green\n elsif num == 4 then return :yellow\n elsif num == 5 then return :cyan\n elsif num == 6 then return :magenta\n end\nend", "def hsla_color\n hsl_color << rand.round(1)\n end", "def random_peg(*allowed_colors)\n allowed_colors[0][rand(0...allowed_colors[0].size)]\n end", "def generate_color\n colors= [\n \"#FF3AAE\",\n \"#B9D61A\",\n \"#952DE1\",\n \"#A4D789\",\n \"#51A3A3\",\n \"#D96C06\",\n \"#039be5\"\n ];\n\n @p_colors=[]\n cnt=Flavor.all.length\n colors.cycle(cnt) do |color|\n @p_colors.push(color)\n @cnt=1\n end\n end", "def new_rgb(red: nil, green: nil, blue: nil, alpha: nil)\n range = 0..255\n rgb = [red, green, blue].map { |channel| channel || rand(range) }\n alpha.nil? ? rgb : rgb + [alpha]\n end", "def create_code\n 4.times do\n @code << @colors.sample\n end\n @code\n end", "def add_colors\n $rainbow_colors.push(\"green\",\"blue\")\nend", "def generate_code\n a = []\n 4.times do\n a.push(@@COLORS.sample)\n end\n a\n end", "def random_pattern_with_color( hex_color )\n randomish_string = ('a'..'z').to_a.shuffle.join\n return GeoPattern.generate(randomish_string, color: hex_color )\n end", "def color(color); end", "def <<(color)\n r = [1.0, color.red * color.alpha + self.red].min\n g = [1.0, color.green * color.alpha + self.green].min\n b = [1.0, color.blue * color.alpha + self.blue].min\n a = self.alpha\n UIColor.colorWithRed(r, green:g, blue:b, alpha:a)\n end", "def before_create\n self.colour = random_colour\n end", "def computer_color\n\t valid_colors = [\"r\", \"y\", \"b\", \"w\", \"c\", \"g\"] \n\t return [valid_colors[rand(0..5)],valid_colors[rand(0..5)], valid_colors[rand(0..5)], valid_colors[rand(0..5)]]\n\tend", "def creator_ai_input\n return Colors.sample(4)\n end", "def random_guess\n return Colors.sample(4)\n end", "def color(id)\n if @colors.has_key?(id) \n @colors[id]\n else\n c = \"%06x\" % (rand * 0xffffff)\n if @colors.values.find{|v| v == c}\n # find another color\n color(id)\n else\n @colors[id] = c\n @colors[id]\n end\n end\n end", "def choose_code\r\n @code = []\r\n 4.times do\r\n @code.push($possible_color[(rand(0..5))])\r\n end\r\n @code\r\n end", "def color(*args)\n @instructions << Instruction.new(:color, args)\n self\n end", "def make_sequence\n 4.times do\n self.sequence << COLORS.sample\n end\n end", "def random(with_alpha: false)\n prefix = with_alpha ? 'rgba' : 'rgb'\n values = (1..3).map{ (rand * 255).round }\n values << rand.round(2) if with_alpha\n\n Inker.color(\"#{prefix}(#{values.join(',')})\")\n end", "def random\n Client.get(\"/colors/random\")\n end", "def color_sample(*args, &block)\n send(COLOR_SYMBOLS[0..-2].sample, *args, &block)\n end", "def pick_hair_color\n # colors = ['Blonde', 'Silver', 'Periwinkle',\n # 'Pink', 'Teal', 'Green', 'Light Green',\n # 'Blue', 'Light Blue',\n # 'Red', 'Dark Red']\n\n ## only use \"darker\" colors - why? why not?\n colors = [ 'Purple',\n 'Orange', 'Red', 'Dark Red']\n\n colors[ rand( colors.size ) ]\nend", "def as_red\n @red += 1\n end", "def code\n colors = Game::COLORS.shuffle\n print colors\n# empty array that will become the generated code\n generated_code = []\n \n# i think it 4 times will pop off the shuffled colors and add it to the end\n# of the generated code array\n 4.times{ generated_code << colors.pop }\n generated_code\n end", "def random_color\n String\n .colors\n .reject { |color| color == :default }\n .sample\n end", "def create_code\n code = []\n 4.times do\n code << Mastermind.show_colors[rand(0..7)]\n end\n code\n end", "def recycle_colors(color_index)\n #\"or\" used for control flow\n colors[i+1] or 0\n end", "def recycle_colors(color_index)\n #\"or\" used for control flow\n colors[i+1] or 0\n end", "def setup_random\n @blocks = Array.new(ROWS) do\n Array.new(COLUMNS) { rand(COLOR_TABLE.size) }\n end\n end", "def as_blue\n @blue += 1\n end", "def randomize_icon_color\n self.icon = \"a\".concat(rand(0..9).to_s)\n end", "def generate_secret_code\n code = Array.new(4)\n code.map! do |code| \n code = generate_random_color\n end\n end", "def polya_urn_model(base_color_distribution, num_balls, alpha)\n return [] if num_balls <= 0\n\n balls_in_urn = []\n 0.upto(num_balls - 1) do |i|\n if rand < alpha.to_f / (alpha + balls_in_urn.size)\n # Draw a new color, put a ball of this color in the urn.\n new_color = base_color_distribution.call\n balls_in_urn << new_color\n else\n # Draw a ball from the urn, add another ball of the same color.\n ball = balls_in_urn[rand(balls_in_urn.size)]\n balls_in_urn << ball\n end\n end\n \n balls_in_urn\nend", "def random(string)\n random_key = @colours.keys.sample\n\n self.send(random_key, string)\n end", "def create_code\n speak\n code = []\n 4.times do\n code << Mastermind.show_colors[rand(0..7)]\n end\n code\n end", "def rainbow\n (0..256).each{ |color| \n print Paint[' ',48,5,color] # print empty bg color field\n }\n puts\n end", "def fill_color(color)\n end", "def color=(color)\n set_color(color)\n generate_buffers\n end", "def recolor(color)\n @color = color\n self\n end", "def add_color(colorname)\n @colors << colorname\n end", "def random\n CYCLE * rand\nend", "def +(other_color)\n other_color = Colorist::Color.from(other_color)\n color = self.dup\n color.r += other_color.r\n color.g += other_color.g\n color.b += other_color.b\n color\n end", "def start_hexes(color)\n squares = []\n @size.times do |index|\n if color == :red\n squares << [0, index] if self[[0, index]] == :red\n else\n squares << [index, 0] if self[[index, 0]] == :blue \n end \n end\n squares\n end", "def +(color)\n mix_with(color.uicolor, 0.5)\n end", "def dup\n Colorist::Color.from_rgb(@r,@g,@b)\n end", "def random(background = false)\n (background ? 40 : 30) + rand(8)\n end", "def random(background = false)\n (background ? 40 : 30) + rand(8)\n end", "def rainbowSpeak1(sentence)\n\trandNum = 0\n\tpreviousRandNum = 0\n\tletterArr = []\n\n\tsentence = sentence.split('')\n\n\tsentence.each do |letter|\n\t\twhile (randNum == previousRandNum)\n\t\t\trandNum = rand(1...7) \n\t\tend\n\n\t\tcase randNum\n\t\twhen 1\n\t\t\tletterArr.push(letter.colorize(:red))\n\t\twhen 2\n\t\t\tletterArr.push(letter.colorize(:green))\n\t\twhen 3\n\t\t\tletterArr.push(letter.colorize(:yellow))\n\t\twhen 4\n\t\t\tletterArr.push(letter.colorize(:blue))\n\t\twhen 5\n\t\t\tletterArr.push(letter.colorize(:magenta))\n\t\twhen 6\n\t\t\tletterArr.push(letter.colorize(:cyan))\n\t\twhen 7\n\t\t\tletterArr.push(letter.colorize(:white))\n\t\tend\n\t\n\t\tpreviousRandNum = randNum\n\tend\n\treturn letterArr.join(\"\")\nend", "def blanket_patterns(colors, lines)\n lines.times do |i|\n first = colors[0]\n rest = colors[1..-1]\n colors = rest + first\n puts colors\n end\nend", "def event_bg_color\n @i ||= -1\n COLORS[(@i += 1) % COLORS.size]\n end", "def computer_code_acquisition\n @code_length.times.map { @colors.sample }\n end", "def spray_paint(new_color)\n self.color = new_color\n end", "def start_color color\n \"\\e[#{COLORS[color]}m\"\n end", "def start!\n @color = @@colors[:green]\n end", "def setup_ghost_colors(num, color)\n # Add 10 to each num to create a new number and assign the opacity-altered color values\n opacity = 0.5\n num += 10\n\n Curses.init_color(num, *color.map { |c| ((c * opacity) / 255.0 * 1000).to_i })\n Curses.init_pair(num, num, num)\nend", "def increment_pixel(rgb_color)\n #and get the value of the last pixel.\n aggregate_pixel = @pixel_count\n #Set the rgb color of a pixel and send it to the writer\n self.write_pixel(rand(@pixel_count),rgb_color)\n #and increment the count tracking the number of pixels we've updated\n @count_of_color_changes += 1\n #Take the number of color changes we've processed, and see if it's a multiple of 33\n if (@count_of_color_changes % 33) == 0\n #if it is, set the white value to be the result of the division\n white_value = @count_of_color_changes / 33\n #create a string formatted to the rgb color string\n white_rgb_color = Array.new(3,white_value).join ','\n #and set the aggregation pixel (our last pixel) to the new white level\n self.write_pixel(aggregate_pixel,white_rgb_color)\n end\n end", "def code_generator(colors, length)\n answer = Array.new(length).map do\n colors.sample\n end.join\n end", "def bg_red; use_code(41) end", "def colors; end", "def setcolorrep(*)\n super\n end", "def rainbow; end", "def color; end", "def color; end", "def color; end", "def assign_game_color count\n case count\n when 0\n return \"transparent\"\n when 6\n return $app_red\n else\n return $app_blue\n end\n end", "def colors() = @colors ||= %i[blue green purple magenta cyan yellow red]", "def singleColor()\n\n\tcolor = $rng.rand(0..5)\n\tword = $rng.rand(6..11)\n\n\tprintJumble(color, word)\n\n\tentry = gets.chomp\n\n\tif (entry == $arr[color+6])\n\t\t\treturn 1\n\telse\n\t\t\treturn 0\n\tend\n\nend", "def set_colour(r,g,b)\n\t\t@colour_r = LoopedArray.new [*r].map {|i| i % 256}\n\t\t@colour_g = LoopedArray.new [*g].map {|i| i % 256}\n\t\t@colour_b = LoopedArray.new [*b].map {|i| i % 256}\n\tend", "def add_colors\n add_colors = [\"red\",\"light_red\",\"light_yellow\"]\n puts add_colors.push(\"green\", \"blue\")\n # Write a solution that adds \"green\", then \"blue\" to the rainbow_colors array, then returns the array\nend", "def set_custom_color idx, red, green, blue\n raise 'Invalid format' if [red, green, blue].find { |c| ! (0..255).include?(c) }\n\n @palette[idx] = [red, green, blue]\n end", "def paint( color )\n self.update(color: color)\n end", "def r; self.color.r end", "def random_pattern\n randomish_string = ('a'..'z').to_a.shuffle.join\n randomish_color = \"%06x\" % (rand * 0xffffff)\n return GeoPattern.generate(randomish_string, color: randomish_color )\n end", "def time_for_new_paint(new_color)\n @color = new_color\n ahh_fresh_paint\nend", "def spray_paint(new_color)\n self.color = new_color\n puts \"Your car is sprayed with a new #{new_color} paint.\"\n end", "def initialize\n\t\t@board = Board.new\n\t\t@turn = rand(2) # randomizes which color starts first even BLACK odd RED\n\tend", "def onMe(str)\n case $prng1.rand(1..6)\n when 1\n return $pastel.on_red(str)\n when 2\n return $pastel.on_green(str)\n when 3\n return $pastel.on_yellow(str)\n when 4\n return $pastel.on_blue(str)\n when 5\n return $pastel.on_magenta(str)\n when 6\n return $pastel.on_cyan(str)\n end\nend", "def isMe(str)\n case $prng1.rand(1..6)\n when 1\n return $pastel.red(str)\n when 2\n return $pastel.green(str)\n when 3\n return $pastel.yellow(str)\n when 4\n return $pastel.blue(str)\n when 5\n return $pastel.magenta(str)\n when 6\n return $pastel.cyan(str)\n end\nend", "def random= rnd\n @sampling.random = rnd\n end", "def mix_with(color, amount)\n color = color.uicolor\n\n # make amount between 0 and 1\n amount = [[0, amount].max, 1].min\n # start with precise amounts: 0, 0.5, and 1.\n if amount == 0 && self.alpha == color.alpha\n self\n elsif amount == 1 && self.alpha == color.alpha\n color\n elsif amount == 0.5 && self.alpha == color.alpha\n r = (self.red + color.red) / 2\n g = (self.green + color.green) / 2\n b = (self.blue + color.blue) / 2\n a = self.alpha\n UIColor.colorWithRed(r, green:g, blue:b, alpha:a)\n else\n a = (color.alpha - self.alpha) * amount + self.alpha\n return UIColor.clearColor if a == 0\n\n color_red = color.red * color.alpha + self.red * (1 - color.alpha)\n self_red = self.red * self.alpha + color.red * (1 - self.alpha)\n color_green = color.green * color.alpha + self.green * (1 - color.alpha)\n self_green = self.green * self.alpha + color.green * (1 - self.alpha)\n color_blue = color.blue * color.alpha + self.blue * (1 - color.alpha)\n self_blue = self.blue * self.alpha + color.blue * (1 - self.alpha)\n\n r = (color_red - self_red) * amount + self_red\n g = (color_green - self_green) * amount + self_green\n b = (color_blue - self_blue) * amount + self_blue\n UIColor.colorWithRed(r, green:g, blue:b, alpha:a)\n end\n end", "def success(*args)\n color(32, *args)\n end", "def get_green_to_red_scala n\n colors = []\n r = 0; g = 150; b = 0\n max = 255\n\n #se empieza en un g oscuro en 150 y se aclarece añadiendo g hasta 255\n #ni = numero iteraciones\n ni = (1*n/3)\n for i in 1..(1*n/3.to_f).floor\n g = 150 + (i*(max - 150)/ni.to_f).floor\n colors << rgb(r, g, b)\n end\n\n #una vez g esta en 255 se añade r desde 150 hasta 255 hasta llegar a amarillo\n #ni = numero iteraciones\n g = 255\n ni = 1 + (2*n/3.to_f).floor - (1*n/3.to_f).ceil\n for j in (1*n/3.to_f).ceil..(2*n/3.to_f).floor\n i = j - (1*n/3.to_f).ceil + 1\n r = 150 + (i*(max - 150)/ni.to_f).floor\n colors << rgb(r, g, b)\n end\n\n #una vez g y r estan en 255 se quita g hasta 0 hasta llegar a rojo\n #ni = numero iteraciones\n g = r = 255\n ni = 1 + n - (2*n/3.to_f).ceil\n for i in (2*n/3.to_f).ceil..n\n g = ((n - i)*(max/ni.to_f)).floor\n colors << rgb(r, g, b)\n end\n\n #se entrega la escala de verde a rojo\n colors\n end" ]
[ "0.797875", "0.7633591", "0.7582373", "0.73997366", "0.72047055", "0.7145231", "0.70770097", "0.70254457", "0.6827469", "0.67569935", "0.67560965", "0.6747252", "0.67109215", "0.66429275", "0.6569292", "0.65565085", "0.6437411", "0.6426247", "0.63545126", "0.6226896", "0.62185353", "0.6209704", "0.6202722", "0.6189695", "0.6187629", "0.61440563", "0.6134422", "0.6123779", "0.61148614", "0.60894847", "0.60853225", "0.6060734", "0.6053078", "0.6026613", "0.6011707", "0.5995225", "0.5984495", "0.59258205", "0.59025216", "0.5896226", "0.5863227", "0.58453536", "0.5826949", "0.5812246", "0.5806082", "0.5806082", "0.5791142", "0.57854044", "0.57518005", "0.5748117", "0.5735447", "0.57347524", "0.5733452", "0.57257575", "0.5725608", "0.5723291", "0.57120997", "0.5702177", "0.56698686", "0.56675893", "0.56643504", "0.5647899", "0.5637652", "0.5623468", "0.5623468", "0.56209797", "0.5618172", "0.561524", "0.5614989", "0.5610687", "0.55900985", "0.5584196", "0.55796576", "0.5563379", "0.5559946", "0.5539528", "0.55367553", "0.5535191", "0.5525728", "0.55195045", "0.55195045", "0.55195045", "0.55118185", "0.55047184", "0.547604", "0.54681045", "0.5463209", "0.5462769", "0.5443387", "0.5424784", "0.5423899", "0.54236174", "0.5422156", "0.53982824", "0.5368948", "0.5364171", "0.5364047", "0.5359879", "0.5358784", "0.5346007" ]
0.90315664
0
Methods below to print the colors formatted
def yellow puts " yellow".yellow puts print "red" puts " blue" puts puts " green" puts end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_colors\n 1.upto(6) { |i| print \"#{i} = \" + \"\\u2b24\".color(COLORS[i]) + \" \" }\n print \": \"\nend", "def print_colors\n 1.upto(COLORS.size) { |i| print \"#{i} = \" + \" \".color(COLORS[i]) + \" \" }\n print \": \"\nend", "def print_colors\n 1.upto(@board.number_of_pegs) { |i| print \"#{i} = \" + \" \".color(COLORS[i]) + \" \" }\n print \": \"\n end", "def colortable\n names = %w(black red green yellow blue pink cyan white default)\n fgcodes = (30..39).to_a - [38]\n\n s = ''\n reg = \"\\e[%d;%dm%s\\e[0m\"\n bold = \"\\e[1;%d;%dm%s\\e[0m\"\n puts ' color table with these background codes:'\n puts ' 40 41 42 43 44 45 46 47 49'\n names.zip(fgcodes).each {|name,fg|\n s = \"#{fg}\"\n puts \"%7s \"%name + \"#{reg} #{bold} \"*9 % [fg,40,s,fg,40,s, fg,41,s,fg,41,s, fg,42,s,fg,42,s, fg,43,s,fg,43,s, \n fg,44,s,fg,44,s, fg,45,s,fg,45,s, fg,46,s,fg,46,s, fg,47,s,fg,47,s, fg,49,s,fg,49,s ]\n }\nend", "def list_colors\n color_string = \"\\nThe available colors are \"\n COLORS.each_with_index do |color, index|\n if index%2 == 0\n color_string += \"\\n\"\n end\n color_string += color + \" \"\n end\n puts color_string\n end", "def colortable\n names = %w(black red green yellow blue pink cyan white default)\n fgcodes = (30..39).to_a - [38]\n\n s = ''\n reg = \"\\e[%d;%dm%s\\e[0m\"\n bold = \"\\e[1;%d;%dm%s\\e[0m\"\n puts ' color table with these background codes:'\n puts ' 40 41 42 43 44 45 46 47 49'\n names.zip(fgcodes).each {|name,fg|\n s = \"#{fg}\"\n puts \"%7s \"%name + \"#{reg} #{bold} \"*9 % [fg,40,s,fg,40,s, fg,41,s,fg,41,s, fg,42,s,fg,42,s, fg,43,s,fg,43,s,\n fg,44,s,fg,44,s, fg,45,s,fg,45,s, fg,46,s,fg,46,s, fg,47,s,fg,47,s, fg,49,s,fg,49,s ]\n }\n end", "def gprint (text)\n\tprint colorize(colorize(text, \"black\"), \"white_bg\")\nend", "def output_color(text, color=text.to_i)\r\n # Color matches: 1 - Black; 2 - White; 3 - Red; 4 - Yellow; 5 - Green; 6 - Blue; 7 - Gold\r\n colors = { 1 => 30, 2 => 36, 3 => 31, 4 => 33, 5 => 35, 6 => 34, 7 => 220 }\r\n # \\e[47m Is for the grey foreground \\e[{color} is for picking the color and \\e[0m is for resetting the terminal.\r\n \"\\e[1m\\e[47m\\e[#{colors[color]}m#{text}\\e[0m\\e[22m\"\r\n end", "def colorize!(color_code) \"#{COLORS[color_code]}#{self.to_s}\\e[0m\" ; end", "def color_table\n [0, 1, 4, 5, 7].each do |attr|\n puts '----------------------------------------------------------------'\n puts \"ESC[#{attr};Foreground;Background\"\n 30.upto(37) do |fg|\n 40.upto(47) do |bg|\n print \"\\033[#{attr};#{fg};#{bg}m #{fg};#{bg} \"\n end\n puts \"\\033[0m\"\n end\n end\n end", "def red\n colorize(31)\n end", "def colorized?; end", "def print(color)\n puts \"Fetched at #{Time.now}\".colorize(:light_black)\n puts status.colorize(color)\n puts agency.colorize(color)\n puts location.colorize(color)\n end", "def puts_red(str)\n puts \" \\e[00;31m#{str}\\e[00m\"\nend", "def formatting\n return \"\\e[0;32m%c/%C |%b>%i| %e\\e[0m\"\n end", "def display_rainbow(colors)\n puts \"R: #{colors[4]}, O: #{colors[1]}, Y: #{[5]}, G: #{colors[3]}, B: #{colors[0]}, I: #{colors[2]}, V: #{colors[6]}\"\n puts colors\nend", "def cyan; if @options[:colors]; \"\\e[1;36m\" else \"\" end end", "def inspect\n @colors.map(&:inspect).join(' -> ').tap do |base|\n break \"#{base} (#{@label})\" if @label\n end\n end", "def color(color=32)\n printf \"\\033[#{color}m\"\n yield\n printf \"\\033[0m\"\nend", "def rainbow\n (0..256).each{ |color| \n print Paint[' ',48,5,color] # print empty bg color field\n }\n puts\n end", "def display_colors\n\t\tprint \"\\nColors: \"\n\t\tMastermind::COLORS.each do |color, _color_code|\n\t\t\tunless color == :blank || color == :black || color == :white\n\t\t\t\tcolor_string = color.to_s.capitalize\n\t\t\t\tprint Mastermind::color(\" #{color_string} \", color)\n\t\t\tend\n\t\tend\n\t\tputs \"\\nChoose a color with it's full name or it's first character\"\n\tend", "def display_rainbow(colors)\n puts \"R: #{colors[0]}, O: #{colors[1]}, Y: #{colors[2]}, G: #{colors[3]}, B: #{colors[4]}, I: #{colors[5]}, V: #{colors[6]}\"\nend", "def puts_color( msg, color=nil )\n color_set( color ); puts msg; color_end\n end", "def colorize(text, color_code); \"\\e[#{color_code}m#{text}\\e[0m\"; end", "def to_color_log\n @lines.map.with_index do |val, i|\n case val[0][:status] \n when \" \"\n \"\\033[90m#{i} #{val[0][:status]} #{val[0][:string]}\\033[0m\"\n when \"+\"\n \"\\033[32m#{i} #{val[0][:status]} #{val[0][:string]}\\033[0m\"\n when \"-\"\n \"\\033[31m#{i} #{val[0][:status]} #{val[0][:string]}\\033[0m\"\n when \"*\"\n \"\\033[36m#{i} #{val[0][:status]} #{val[0][:string]}\\033[0m\"\n end\n end.join(\"\\n\") + \"\\n\"\n end", "def green\n colorize(32)\n end", "def red\n colorize(:red)\nend", "def colors; end", "def puts_blue(string)\n puts \"\\033[34m\" + string + \"\\033[0m\"\nend", "def display_color_index\n require_color_echo_get\n\n CE.rainbow\n cnt = 134\n @padding = \" \" * 2\n\n header = \"OK, Let me check color index list... :)\"\n mes = CE.rainbow.get(@padding + \"-\" * cnt) + $/\n mes += @padding + \" \" * ((cnt - header.size)/2) + CE.rainbow.get(header) + $/\n mes += CE.rainbow.get(@padding + \"-\" * cnt) + $/\n\n mes += @padding\n 256.times do |i|\n num = i + 1\n mes += CE.fg(\"index#{num}\".intern).get(\"index#{num}\" + \" \" * (4 - num.to_s.size))\n mes += CE.bg(\"index#{num}\".intern).get(\" \" * 5)\n mes += \" \" * 3\n\n if num % 8 == 0\n mes += $/ * 2\n mes += @padding if num != 256\n end\n end\n print mes \n\n exit 0\nend", "def init_colors\n $desc_color = \"#{GREEN}\" # color of description portion\n # color the title based on priority\n $p5color = \"#{BLUE}#{BOLD}\" \n $p4color = \"#{MAGENTA}\" \n $p3color = \"#{CYAN}#{BOLD}\" \n $p2color = \"#{BOLD}\"\n $p1color = \"#{YELLOW}#{ON_RED}\"\n #\n # color for only the type column\n $bugcolor = \"#{BLACK}#{ON_RED}\"\n $enhcolor = \"#{GREEN}\"\n $feacolor = \"#{CYAN}\"\n\n # color for row of started event\n $startedcolor = \"#{STANDOUT}\"\n\n cols = %x[tput colors] rescue 8\n cols = cols.to_i\n if cols >= 256\n $desc_color = \"\\x1b[38;5;236m\" # 256 colors, grey\n $p5color = \"\\x1b[38;5;57m\" # some kinda blue\n $p4color = \"\\x1b[38;5;239m\" # grey. 256 colors\n $p3color = \"\\x1b[38;5;244m\" # grey, 256 colors\n end\n end", "def format_color(name, text)\n if Pry.color\n \"\\001#{Pry::Helpers::Text.send name, '{text}'}\\002\".sub '{text}', \"\\002#{text}\\001\"\n else\n text\n end\nend", "def to_color_string\n ::PrintMembers::ColorString.new self\n end", "def console_display\n print @grid.to_text { |s| Grid.color_to_char(s.color) }\n end", "def cprint (msg = \"\", color = \"\")\n\n # Identify method entry\n debug_print \"#{ self } : #{ __method__ }\\n\"\n\n # This little check will allow us to take a Constant defined color\n # As well as a [0-256] value if specified\n if (color.is_a?(String))\n debug_print \"Custom color specified for cprint\\n\"\n STDOUT.write(color)\n elsif (color.between?(0, 256))\n debug_print \"No or Default color specified for cprint\\n\"\n STDOUT.write(\"\\e[38;5;#{ color }m\")\n end\n\n STDOUT.write(msg)\n end", "def display_rainbow(color_lists)\n puts \"R: #{color_lists[0]}, O: #{color_lists[1]}, Y: #{color_lists[2]}, G: #{color_lists[3]}, B: #{color_lists[4]}, I: #{color_lists[5]}, V: #{color_lists[6]}\"\nend", "def to_s\n [name, COLORS[ color ]].join(\"\\n\")\n end", "def print_color_block(color)\n color = COLORS[(color + 3) % NUM_COLORS]\n \"\\033[\" + color + 'm' + \"\\xe2\\x96\\x88\"\nend", "def cprint (msg = \"\", color = \"\")\n\n # Identify method entry\n debug_print \"#{ self } : #{ __method__ }\\n\"\n\n # This little check will allow us to take a Constant defined color\n # As well as a [0-256] value if specified\n if (color.is_a?(String))\n debug_print \"Custom color specified for cprint\\n\"\n @output.write(color)\n elsif (color.between?(0, 256))\n debug_print \"No or Default color specified for cprint\\n\"\n @output.write(\"\\e[38;5;#{ color }m\")\n end\n\n @output.write(msg)\n end", "def prn\n puts \" #{(0..8).to_a.join(\" \")}\"\n puts \" #{'-' * (2 * 9)}\"\n g.each_with_index do |v, i|\n # ERROR: print function doesn't display values and doesn't use colors\n # puts \"#{i} #{v.join(\" \")}\"\n puts \"#{i} | #{v.map{|t| t.n.to_s.colorize(t.c) }.join(' ')}\"\n end\n end", "def p( colsizes, align = nil, padding = 3 )\n\t\t\t\tprint \"#{@color}\"\n\t\t\t\tidx = 0\n\t\t\t\[email protected] do |item|\n\t\t\t\t\tif align and align[idx] == \"r\"\n\t\t\t\t\t\tprint \" \" * ( colsizes[ idx ] - item.to_s.length_utf8 )\n\t\t\t\t\tend\n\t\t\t\t\tprint item\n\t\t\t\t\tif align==nil or (align and align[idx] == \"l\")\n\t\t\t\t\t\tprint \" \" * ( colsizes[ idx ] - item.to_s.length_utf8 )\n\t\t\t\t\tend\n\t\t\t\t\tprint \" \"*padding if idx < colsizes.length - 1\n\t\t\t\t\tidx += 1\n\t\t\t\tend\n\t\t\t\tputs \"\\033[0m\"\n\t\t\tend", "def cputs(*args)\n puts args.join(\"\\n\").colorize\nend", "def colorize(text, color)\n\t\"\\e[#{Colors[color]}m#{text}\\e[0m\"\nend", "def yellow\n colorize(33)\n end", "def display_rainbow(colors)\n if (colors.size > 0) \n puts \"R: #{colors[0]}, O: #{colors[1]}, Y: #{colors[2]}, G: #{colors[3]}, B: #{colors[4]}, I: #{colors[5]}, V: #{colors[6]}\"\n end\nend", "def blue = \"\\e[36m#{self}\\e[0m\"", "def html_out(msg, color_name=\"black\")\n\t\t\trgb = Color::RGB::by_name color_name\n\t\t\tputs \"<span style='color:#{rgb.css_rgb};'>#{msg}</span>\"\n\t\tend", "def purple\n colorize(35)\n end", "def c(text, colors)\n text = \"%{B#{colors[:bg]}}#{text}%{B-}\" if colors[:bg]\n text = \"%{F#{colors[:fg]}}#{text}%{F-}\" if colors[:fg]\n text\nend", "def colors(warm, cool)\n puts \"#{warm} is a contrast color to #{cool}\"\nend", "def colorNormal\n puts \"\\033[0m\"\n end", "def red(input)\n puts \"\\e[31m#{input}\\e[0m\"\nend", "def red(input)\n puts \"\\e[31m#{input}\\e[0m\"\nend", "def output_message message, color\n case color\n when :black\n puts message.black\n when :red\n puts message.red\n when :green\n puts message.green\n when :yellow\n puts message.yellow\n when :blue\n puts message.blue\n when :magenta\n puts message.magenta\n when :cyan\n puts message.cyan\n else\n puts message\n end\n end", "def colorize(text, color_code); \"#{color_code}#{text}\\033[0m\"; end", "def colorize(s, c = :green)\n %{\\e[#{c == :green ? 33 : 31}m#{s}\\e[0m}\n end", "def colored(s)\n\tif $stdout.tty?\n\t\t\"\\e[0;36;49m#{s}\\e[0m\"\n\telse\n\t\t\"[#{s}]\"\n\tend\nend", "def color\n\t\t \t\t\t\"El color de tu vaca es #{@color}\"\n\t\t \t\tend", "def chco\n (foreground || \"FFFFFF\") + \",\" + super\n end", "def colorize(color_code)\n \"\\e[#{color_code};40m#{self}\\e[0m\"\n end", "def to_s(indent=0)\r\n prefix = indent > 0 ? ' ' * indent : ''\r\n \"#{prefix}Colour (#{@red}/#{@green}/#{@blue})\"\r\n end", "def green(string)\n \"\\033[0;32m#{string}\\033[0m\"\nend", "def green(string)\n \"\\033[0;32m#{string}\\033[0m\"\nend", "def green(string)\n \"\\033[0;32m#{string}\\e[0m\"\nend", "def green(string)\n \"\\033[0;32m#{string}\\e[0m\"\nend", "def print_status(msg, color)\n cprint RESET + BOLD\n cprint WHITE + \"[ \"\n cprint \"#{ msg } \", color\n cprint WHITE + \"] \" + RESET\n end", "def print_status(msg, color)\n cprint RESET + BOLD\n cprint WHITE + \"[ \"\n cprint \"#{ msg } \", color\n cprint WHITE + \"] \" + RESET\n end", "def colorize *args\n $terminal.color(*args)\nend", "def parse_colors(s)\n\n line = \"\"\n\n s.each_char do |c|\n line.concat(@colors[c]) if @colors.has_key?(c)\n line.concat(\" \")\n end\n\n line.concat(\"\\033[0m\")\n end", "def statuses(status)\n puts \"\\n#{status}\\n\".colorize(:green)\nend", "def red(msg)\n \"\\033[31m#{msg}\\033[39m\"\nend", "def red(string)\n \"\\033[0;33m#{string}\\033[0m\"\nend", "def red(string)\n \"\\033[0;33m#{string}\\033[0m\"\nend", "def bash_color_codes(string)\n string.gsub(\"\\e[0m\", '</span>').\n gsub(\"\\e[31m\", '<span class=\"color31\">').\n gsub(\"\\e[32m\", '<span class=\"color32\">').\n gsub(\"\\e[33m\", '<span class=\"color33\">').\n gsub(\"\\e[34m\", '<span class=\"color34\">').\n gsub(\"\\e[35m\", '<span class=\"color35\">').\n gsub(\"\\e[36m\", '<span class=\"color36\">').\n gsub(\"\\e[37m\", '<span class=\"color37\">')\n end", "def red(output)\n color(31, output)\n end", "def colorize(text = '_', color = 'default', bgcolor = 'default')\n colors = {\n 'default' => 38,\n 'black' => 30,\n 'red' => 31,\n 'green' => 32,\n 'brown' => 33,\n 'blue' => 34,\n 'purple' => 35,\n 'cyan' => 36,\n 'gray' => 37,\n 'dark gray' => '1;30',\n 'light red' => '1;31',\n 'light green' => '1;32',\n 'yellow' => '1;33',\n 'light blue' => '1;34',\n 'light purple' => '1;35',\n 'light cyan' => '1;36',\n 'white' => '1;37'\n }\n bgcolors = {\n 'default' => 0,\n 'black' => 40,\n 'red' => 41,\n 'green' => 42,\n 'brown' => 43,\n 'blue' => 44,\n 'purple' => 45,\n 'cyan' => 46,\n 'gray' => 47,\n 'dark gray' => 100,\n 'light red' => 101,\n 'light green' => 102,\n 'yellow' => 103,\n 'light blue' => 104,\n 'light purple' => 105,\n 'light cyan' => 106,\n 'white' => 107\n }\n\n color_code = colors[color]\n bgcolor_code = bgcolors[bgcolor]\n\n return \"\\033[#{bgcolor_code};#{color_code}m#{text}\\033[0m\"\nend", "def color_puts(id, text)\n if id < 10\n print \"\\033[0;3#{id+1}m#{text}\\033[0m\\n\"\n else\n print \"#{text}\\n\"\n end\nend", "def red(str)\n \"\\e[31m#{str}\\e[0m\"\nend", "def red(str)\n \"\\e[31m#{str}\\e[0m\"\nend", "def red(str)\n \"\\e[31m#{str}\\e[0m\"\nend", "def red(str)\n \"\\e[31m#{str}\\e[0m\"\nend", "def red(str)\n \"\\e[31m#{str}\\e[0m\"\nend", "def colorize(color, text)\n \"\\e[#{color}m#{text}\\e[0m\"\n end", "def red(text)\n colorize(text, 31)\nend", "def colorize(text, color_code)\n \"\\e[#{color_code}m#{text}\\e[0m\"\nend", "def colorize txt, fg, bg, flags\n fgc = (fg.nil? || Color === fg ) ? fg : Color.parse(fg)\n bgc = (bg.nil? || Color === bg) ? bg : Color.parse(bg)\n esc = []\n esc << '01' if flags[:b]\n esc << '03' if flags[:i]\n esc << '04' if flags[:u]\n esc << '07' if flags[:r]\n esc << \"38;05;#{fgc.xterm256}\" if fgc\n esc << \"48;05;#{bgc.xterm256}\" if bgc\n \n esc.empty? ? txt : \"\\e[#{esc.join(';')}m#{txt}\\e[0m\" \n end", "def colorize(color_code)\n \"\\e[#{color_code}m#{self}\\e[0m\"\n end", "def colorize(color_code)\n \"\\e[#{color_code}m#{self}\\e[0m\"\n end", "def bold_red(output)\n color('1;31', output)\n end", "def to_s\n if self.color == :white\n \" ♜ \"\n else\n \" ♖ \"\n end\n end", "def color_matrix( txt = \"[X]\" )\n size = String.colors.length\n String.colors.each do | color |\n String.colors.each do | back |\n print txt.colorize( :color => color, :background => back )\n end\n puts \" < #{color}\"\n end\n String.colors.reverse.each_with_index do | back, index |\n puts \"#{\"|\".rjust(txt.length)*(size-index)} < #{back}\"\n end\n \"\"\n end", "def info(*args); say $terminal.color(format(*args), :yellow); end", "def colors\n return\n end", "def draw\n $code.each { |color| print \"[ #{Rainbow(\"o\").background(color)} ] \" }\n puts \"\\n\\n\"\n end", "def to_c\n\t\t\tif color == \"white\"\n\t\t\t\t\"\\u26aa\"\n\t\t\telsif color == \"red\"\n\t\t\t\t\"\\u26d4\"\n\t\t\telsif color == \"black\"\n\t\t\t\t\"\\u26ab\"\n\t\t\tend\n\t\tend", "def red(string)\n \"\\033[0;31m#{string}\\e[0m\"\nend", "def red(string)\n \"\\033[0;31m#{string}\\e[0m\"\nend", "def colorize(* colors)\n buff = []\n colors.each{|color| buff << color_code(color)}\n buff << self << color_code(:off)\n buff.join\n end", "def display_rainbow(color_array)\n puts \"R: #{color_array[0]}, O: #{color_array[1]}, Y: #{color_array[2]}, G: #{color_array[3]}, B: #{color_array[4]}, I: #{color_array[5]}, V: #{color_array[6]}\"\nend", "def colorize(text, color_code)\n \"#{color_code}#{text}\\e[0m\"\nend", "def green(text)\n colorize(text, 32)\nend" ]
[ "0.8401645", "0.83544445", "0.794392", "0.7561849", "0.7499658", "0.74204737", "0.7370787", "0.7359186", "0.7327295", "0.72326493", "0.72243935", "0.7222981", "0.71984416", "0.7150895", "0.7147547", "0.7136925", "0.712202", "0.7117745", "0.7099947", "0.709909", "0.7026124", "0.70227504", "0.6996038", "0.6995102", "0.69393975", "0.6931694", "0.6926891", "0.6918888", "0.6918262", "0.68980473", "0.6894996", "0.68822485", "0.68788207", "0.68746436", "0.6828601", "0.6820927", "0.6819125", "0.681732", "0.6813976", "0.68097204", "0.68035465", "0.6799726", "0.6753151", "0.67523324", "0.6748224", "0.6734754", "0.672532", "0.67028064", "0.67021525", "0.66991276", "0.669833", "0.6685901", "0.6685901", "0.6683752", "0.6679339", "0.66733015", "0.6666101", "0.6665571", "0.6660767", "0.6659171", "0.6658366", "0.6653062", "0.6653062", "0.6635102", "0.6635102", "0.6616392", "0.6616392", "0.66131437", "0.6610259", "0.65976936", "0.65886825", "0.6588648", "0.6588648", "0.65867865", "0.658", "0.65754336", "0.65752757", "0.6572233", "0.6572233", "0.6572233", "0.6572233", "0.6572233", "0.65430784", "0.65409505", "0.6540904", "0.65354073", "0.6522423", "0.6522423", "0.65153056", "0.65115327", "0.6509288", "0.650922", "0.65050673", "0.6502617", "0.6501625", "0.6489923", "0.6489923", "0.64814705", "0.64771056", "0.6472781", "0.6469644" ]
0.0
-1
global starting point function
def program_entry_point flag=1; input_choice=0; student_list = StudentList.new while flag>0 puts 'Student Management Program' puts '----------------------------------' puts '1. Enter New Student Data' puts '2. Display Student Data via Search' puts '3. Display Entire List' puts '4. Delete First Student Record' puts '5. Delete Last Student Record' puts '6. Exit' puts '----------------------------------' puts 'Enter Choice Number :' input_choice = gets.chomp.to_i; puts input_choice case input_choice when 1 s = getData student_list.append(s) when 2 puts 'Enter student roll number' key = gets.chomp.to_i student_list.[](key) when 3 student_list.display_list when 4 student_list.deleteFirst when 5 student_list.deleteLast when 6 puts 'Exiting.........' flag=0 end end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def starting; end", "def start;end", "def start; end", "def start; end", "def start; end", "def start; end", "def start; end", "def start; end", "def start; end", "def start; end", "def start; end", "def start; end", "def start; end", "def start; end", "def start; end", "def start; end", "def start; end", "def start\n end", "def start\n end", "def start\n end", "def start\n end", "def start\n end", "def start\n end", "def start\n end", "def start\n end", "def start\n end", "def start\n end", "def start\n end", "def start\n end", "def start\n \n\tend", "def start_run; end", "def started; end", "def global; end", "def start\n noth\n end", "def start\n 0\n end", "def start\n\t\tinit\n\t end", "def startup\nend", "def beginning\n abstract!\n end", "def startup_hook; end", "def start\n end", "def start\n end", "def startup\n end", "def startup\n end", "def startup\n end", "def start\n end", "def start\n\t\tend", "def startup\n end", "def start=(_arg0); end", "def start\n\n end", "def start!\n\t\t\n\t\[email protected]_location #prints initial location (i.e., (0,0))\n\t\twhile [email protected]_home? do\n\t\t\[email protected]!\n\t\t\[email protected]_location #print location after each hop (don't need a method for final_location)\n\t\t\t@hops += 1\n\t\tend\n\t\t\n\tend", "def main; end", "def started?; end", "def start\n copy_all_functions \"Copying functions over\"\n end", "def start_after=(_arg0); end", "def start(info); end", "def main_end ; end", "def start_after; end", "def preloop\n end", "def initial; end", "def command_start; end", "def command_start; end", "def command_start; end", "def main\n\n end", "def end_point=(_arg0); end", "def running; end", "def running; end", "def pre_loop; end", "def main\n end", "def starting_position=(_arg0); end", "def starting_position; end", "def caller(start=1) end", "def initialize_starting_block\n nil\n end", "def run_init_script; end", "def always_run=(_arg0); end", "def start()\n\n\t\tend", "def start(mode)\n end", "def running=(_arg0); end", "def start; @opts['start']; end", "def stopped_at=(_arg0); end", "def stopped_at=(_arg0); end", "def stopped_at=(_arg0); end", "def landing() end", "def onStart\r\n end", "def start\n true\n end", "def autostart=(_arg0); end", "def setup(point)\n\t\t\n\tend", "def start\n @config[:start]\n end", "def start \n @Done = false \n end", "def start\n put :start\n end", "def main\n\nend", "def prepare_result\n\t\t\tsuper\n\t\t\t@lowest_old = MM.get_lowest_old(@current_point, @start_vector, @hd_config, false, @tuning_range)\n if @lowest_old[0] == nil\n\t\t\t\t@initial_run = true\n\t\t\t\tthrow :jump_back\n\t\t\tend\n\t\tend", "def run; end", "def run; end", "def run; end", "def run; end", "def run; end", "def run; end", "def run; end", "def run; end", "def run; end", "def on_start\n end" ]
[ "0.80667645", "0.7106009", "0.7059703", "0.7059703", "0.7059703", "0.7059703", "0.7059703", "0.7059703", "0.7059703", "0.7059703", "0.7059703", "0.7059703", "0.7059703", "0.7059703", "0.7059703", "0.7059703", "0.7059703", "0.698288", "0.698288", "0.698288", "0.698288", "0.698288", "0.698288", "0.698288", "0.698288", "0.69743174", "0.69743174", "0.69743174", "0.69743174", "0.6813936", "0.68093216", "0.67874146", "0.67640823", "0.6762184", "0.67525315", "0.6692768", "0.66668665", "0.6629313", "0.66250503", "0.6613308", "0.6613308", "0.65964544", "0.65964544", "0.65964544", "0.6554807", "0.6550872", "0.65298873", "0.64883995", "0.6488284", "0.64882034", "0.64740634", "0.64552915", "0.64436585", "0.6433401", "0.6421191", "0.64102197", "0.64089215", "0.63968176", "0.6365215", "0.63585", "0.63585", "0.63585", "0.6353768", "0.6343623", "0.6323252", "0.6323252", "0.63149405", "0.6299388", "0.62989926", "0.6292102", "0.6291115", "0.6271666", "0.62694335", "0.6265644", "0.62474996", "0.6186035", "0.61860037", "0.6184603", "0.6166005", "0.6166005", "0.6166005", "0.6163638", "0.6159143", "0.61538756", "0.61391574", "0.61374253", "0.61190104", "0.610213", "0.6092587", "0.60905683", "0.60751504", "0.6070763", "0.6070763", "0.6070763", "0.6070763", "0.6070763", "0.6070763", "0.6070763", "0.6070763", "0.6070763", "0.60473514" ]
0.0
-1
test "the truth" do assert true end
def setup @frank = users(:frank) @mike = users(:mike) end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_the_truth\n assert true\nend", "def test_the_truth\n assert true\n end", "def test_the_truth\n assert true\n end", "def test_this_works\n assert true\n end", "def test_truth\r\n assert true\r\n end", "def test_truth\r\n assert true\r\n end", "def test_truth\r\n assert true\r\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end", "def test_truth\n assert true\n end" ]
[ "0.9192157", "0.9121996", "0.9121996", "0.9044381", "0.90220094", "0.90220094", "0.90220094", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693", "0.8968693" ]
0.0
-1
method dumps data to file
def dump File.open(@meta_data_file_location,'w') { |f| f.write(YAML.dump(@meta_data))} end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write\n open(@fname,\"wb\") do |file|\n Marshal.dump(@data,file)\n end\n end", "def dump_file_data(io)\n end", "def save\n pathname.open('w') { |file| file.write(data) }\n end", "def dump_data(to)\n data.keys.each do |key|\n FileUtils.mkdir_p(to + File.dirname(key))\n File.open(to + key, \"wb\") do |out|\n case data[key]\n when StringIO\n out.write data[key].read # .mbackup file data\n when Hash\n out.write Plist::Emit.dump(data[key]) # Info.plist, etc.\n else\n puts \"couldn't write out #{key}, don't know how to handle a #{data[key].class}\"\n end\n end\n end\n end", "def write_data(filename, data)\n file = File.open(filename, \"w\")\n file.puts(data)\n file.close\nend", "def write_data(filename, data)\n file = File.open(filename, \"w\")\n file.puts(data)\n file.close\nend", "def write_data(filename, data)\n file = File.open(filename, \"w\")\n file.puts(data)\n file.close\nend", "def save(filename, data)\n\tf = File.open(filename, 'w')\n\tf.puts(data)\n\tf.close\nend", "def save_data(file)\n File.open(file, 'w').write(JSON.pretty_generate(@data))\n end", "def write(data)\n begin\n File.open(@filename, \"wb\") { |file| file.puts data.to_csv }\n rescue \n puts \"Error: \" + $!.to_s\n end \n end", "def save!; File.write @path, @data end", "def write data, path\n\t\t\tcontent = \"\"\n\t\t\tfile_type = data.class.to_s\n\n\t\t\tif file_type == 'Array'\n\t\t\t\tdata.each do | line |\n\t\t\t\t\tline.each do | key, val |\n\t\t\t\t\t\tcontent << \"#{key.to_s}=#{val}\\n\"\n\t\t\t\t\tend\n\t\t\t\t\tcontent << \"\\n\"\n\t\t\t\tend\n\n\t\t\telsif file_type == 'Hash'\n\t\t\t\tdata.each do | key, val |\n\t\t\t\t\tcontent << \"#{key.to_s}=#{val}\\n\"\n\t\t\t\tend\n\t\t\tend\n\n\t\t\tpath = File.expand_path path\n\t\t\tFile.open(path, 'w+') do |f|\n\t\t\t\tf.write content\n\t\t\tend\n\t\tend", "def write_data(filename, data)\n\tFile.open(filename, 'w').puts(data)\nend", "def write\n file = ::File.open(@file, 'w')\n file.write(Bencode.encode(@data))\n file.close\n end", "def write_save_data(file)\r\n write_characters(file)\r\n write_frame(file)\r\n write_setup(file)\r\n write_data(file)\r\n end", "def write(data); end", "def write(data); end", "def write(data); end", "def write(data); end", "def write(data)\n begin\n File.open(@filename, \"w\") { |file| file.puts data.to_html }\n rescue \n puts \"Error: \" + $!.to_s\n end \n end", "def save_output(data)\n ::File.open(datastore['OUTFILE'], 'wb') do |f|\n f.write(data)\n print_status(\"Saved results in #{datastore['OUTFILE']}\")\n end\n end", "def save_output(data)\n ::File.open(datastore['OUTFILE'], 'wb') do |f|\n f.write(data)\n print_status(\"Saved results in #{datastore['OUTFILE']}\")\n end\n end", "def write(data)\n File.open(@filename, mode(\"w\")) do |f|\n f.flock File::LOCK_EX\n f << export(data)\n end\n end", "def save_data(data, filename)\n File.binwrite(filename, Marshal.dump(data))\n return nil\nend", "def save_to_file\n f = File.open(\"#{IMAGE_DATA_DIR}/#{self.id}\",\"w\")\n f.write(self.data)\n f.close\n end", "def write_to_file(path)\n File.open(path, \"w\") do |f|\n f.print serialize\n end\n end", "def save(data)\n File.open(@local_file_path, \"wb\") {|file| file.write(data) }\n end", "def save_data(data,filename=nil)\n filename=data_file(filename)\n case File.extname(filename).downcase\n when \".yaml\",\".yml\"\n open(filename,\"w\"){|f| f.puts YAML.dump(data)}\n when \".json\"\n open(filename,\"w\"){|f| f.puts JSON.generate(data)}\n else\n open(filename,\"w\"){|f| f.puts data }\n end\n end", "def dumpFile(fname)\n\t\treturn writeFile(fname, self.dump())\n\tend", "def file(data, path)\n File.open(path, 'w') { |f| f << data }\n end", "def write_to_file(data)\n\t\t\tref = File.join(@root, \"tarrifs_\" + @page[:name])\n\n\t\t\tif File.exists?(ref)\n\t\t\t\tdiff = \"\"\n\t\t\t\tstatus = Open4::popen4(\"diff #{ref} -\") do |pid, stdin, stdout, stderr|\n\t\t\t\t\tstdin.puts data\n\t\t\t\t\tstdin.close\n\t\t\t\t\tdiff = stdout.read\n\t\t\t\tend\n\t\t\t\t#sent mail if content is different\n\t\t\t\tif status != 0\n\t\t\t\t\twrite \"change detected.\"\n\t\t\t\t\tnotify_changed_site(url, diff)\n\t\t\t\tend\n\t\t\tend\n\t\t\tFile.open(ref, \"w\") do |f|\n\t\t\t\tf.puts data\n\t\t\tend\n\t\tend", "def save(dir, data, filename)\n FileUtils.mkdir_p dir\n fn = dir+\"/\"+filename \n puts fn\n open(fn, 'w') { |f|\n f.puts data\n f.close\n }\n end", "def save_to_file\n File.open(@output, 'w+') do |file|\n file.puts HEADER if @additional_html\n file.puts @data_for_output.join(\"\\n\")\n file.puts FOOTER if @additional_html\n end\n end", "def dump_file_data(io)\n while buffer = file_data.read(4096) do io.write(buffer) end\n\n # Attempt to ensure that the file data will still be in a readable state\n # at the beginning of the data for the next user, but close it if possible\n # in order to conserve resources.\n if file_path.nil? then\n # When the file_path attribute is not set, the file_data method cannot\n # reinitialize the IO object it returns, so attempt to rewind the file\n # data back to the beginning, but ignore errors.\n begin\n file_data.rewind\n rescue\n # Ignore.\n end\n else\n # Since the file_path attribute is set, the file_data method will\n # reinitialize the IO object it returns if we close the object here.\n file_data.close\n end\n end", "def to_file(filename)\n\tdata2write\t= JSON.pretty_generate(@json_data);\n\tFile.open(filename, 'w+'){|f| f.write(data2write)}\nend", "def write\n write_data\n end", "def write(data)\n end", "def store\n File.open(@file_name, 'w') do |file|\n file.write YAML::dump(@data)\n end\n end", "def save_data\n puts \"saving data\"\n\n File.open(generate_filename(self), \"w\") do |f|\n f.write(ostruct_to_hash(self.json).to_yaml)\n end\n end", "def to_file\n \"#{@name};#{@id}\"\n end", "def to_file( f )\n buf = [@name, @ref.binary_role(), @ref.binary_type(), @ref.name,\n @type, TYPE_SIZES[@type], \n @data.length()].pack(PFORMAT)\n f.write(buf)\n\n fmt_str = \"%s%d\" % [TYPES[@type], data.length]\n buf = data.pack(fmt_str)\n f.write(buf)\n end", "def dump_file_data(io)\n io.write(@link_target)\n end", "def save_dataE(obj, filename) \r\n File.open(filename, \"wb\") { |f|\r\n Marshal.dump(obj, f)\r\n }\r\nend", "def write2(file = 'default', data, mode, size)\n dump_object(file)\n dump_object(data)\n dump_object(mode)\n dump_object(size)\n puts \"===========================\"\nend", "def save()\n File.write(@database_file, @data.to_json)\n end", "def writeData\n # Make the directory to store the RoadWaer data.\n File.makedirs(File.dirname($datafilename))\n $debugdata = $debugdata + \"makedir\\n\"\n\n # Open the file to append the registration data.\n file = File.open($datafilename, \"a\")\n $debugdata = $debugdata + \"open\\n\"\n # Write user data.\n file.puts($events.read.print)\n $debugdata = $debugdata + \"puts\\n\"\n # Make sure the output file is always closed.\n file.close\n $debugdata = $debugdata + \"close\\n\"\n\n $debugdata = $debugdata + value.local_path + \"\\n\"\n $debugdata = $debugdata + value.original_filename + \"\\n\"\n $debugdata = $debugdata + value.content_type + \"\\n\"\n true\n\n rescue\n false\nend", "def save_data(data)\n raw = JSON.dump(data)\n File.open(FILENAME, 'w') do |file|\n file.write(raw)\n end\nend", "def write(data)\n # black hole, because we don't actually care about what gets written\n end", "def write path\n File.open(path, 'w') do |io|\n io.print serialize\n end\n end", "def dump(file_path = nil)\n file_path ||= File.join(@config.data_path, \"#{Time.now.to_i.to_s}.bin\")\n File.open(file_path, 'wb') do |io|\n io << MessagePack.pack(to_hash)\n end\n file_path\n end", "def write_list_to_file(filename, data)\n file = File.open(filename, \"w\")\n data.each do |d|\n file.write(d.to_s + \"\\n\")\n end\n file.close unless file.nil?\nend", "def save_to_file(obj)\n File.open(@filename, @mode) do |aFile|\n aFile.puts \"#{obj}\"\n end\n end", "def data_file(counter)\n # create a temp file\n t = File.open(File.join(base,\"data\", \"#{counter}.data\"), 'w')\n t << self.data\n t.close\n t.path\n end", "def writeFile(fname, data)\n\tinf = File.new(fname, \"w+\")\n\tret = inf.write(data)\n\tinf.close\n\treturn ret\nend", "def save_to(filepath)\n File.open(filepath, \"w\") do |fp|\n if @args[:data].is_a?(StringIO)\n fp.write(@args[:data].string)\n else\n #Stream output to avoid using too much memory.\n self.io do |fp_read|\n fp_read.lines do |line|\n fp.write(line)\n end\n end\n end\n end\n end", "def write(passed_data = nil)\n\t\t\tthis_data = passed_data || data\n\t\t\tFile.open(file_path+'/'+file_name, 'wb') do |file|\n\t\t\t\tfile.write( this_data.to_json )\n\t\t\tend\n\t\tend", "def to_file( f )\n size = 0\n @indexes.each { size += Index::FORMAT_SIZE }\n @idata.each do |i| \n size += IntData::FORMAT_SIZE\n size += i.data_size\n end\n\n buf = [@source, @name, @sym, @indexes.length(), @idata.length(), \n size].pack(PFORMAT)\n f.write(buf)\n @indexes.each { |i| i.to_file(f) }\n @idata.each { |i| i.to_file(f) }\n end", "def write()\n f = File.open(\"#{@directory}/#{@filename}\", \"w\")\n f.write(@raw)\n f.close()\n end", "def write_content(file_out)\n file_out.puts(@array)\n end", "def write_file(dir, file, data)\n File.open(\"#{dir}/#{file}.data\", \"w\") do |file|\n file.write(data.to_a.join(\"\\n\"))\n end\nend", "def to_file\n replace_with_tempfile unless @tempfile_in\n flush\n self\n end", "def write_file(filename, data)\n f = File.open(filename, 'w')\n f.write(data)\n f.close\nend", "def dump\r\n @db_mon.synchronize {\r\n File.open(@path,'wt') { |file|\r\n @seek_db.each_value { |x|\r\n file.write(x.to_json)\r\n file.write(\"\\n\")\r\n }\r\n } rescue nil\r\n }\r\n end", "def writeData(f)\n xgrid = getOption(\"xgrid\")\n sigma = getOption(\"sigma\")\n\n @data.length.times { |i|\n\tx = if xgrid then xgrid[i].to_s else i.to_s end\n\tsig = if sigma then sigma[i].to_s else \" \" end\n\t\n\tf.writeln( x + \" \" + @data[i].to_s + \" \" + sig)\n }\n end", "def save_to_file\n\t\tFile.open(SOURCE_PATH + DATABASE_NAME, 'a') do |file| \n\t\t\tfile.puts(\"#{@street_address}|\" + \\\n\t\t\t\t\t\t\"#{@house_type}|\" + \\\n\t\t\t\t\t\t\"#{@no_of_bedrooms}|\" + \\\n\t\t\t\t\t\t\"#{@no_of_bathrooms}|\" + \\\n\t\t\t\t\t\t\"#{@no_of_toilets}|\" + \\\n\t\t\t\t\t\t\"#{@land_size}|\" + \\\n\t\t\t\t\t\t\"#{@location}\")\n\t\tend\n\tend", "def exportfile arr\n begin\n file = File.open(\"result.txt\", \"w\")\n text = showResulf arr\n file.puts text\n file.close\n binding.pry\n rescue IOError => e\n puts \"Can not write file. Please try again after there.\"\n ensure\n file.close unless file.nil?\n end\nend", "def toFile(filename)\t\n file = File.open(filename, 'w')\n db.each_key do |name|\n s = \"\"\n if (db[name].class == BasicFood)\n \ts+= name + \",b,\" + db[name].calories.to_s\n end\n if (db[name].class == Recipe)\n \ts = name + \",r\"\n\tdb[name].foods.each do |food|\n\t s += \",\" + food.name\n\tend\n end\n s += \"\\n\"\n file.write(s)\n end\n end", "def _dump() end", "def serialize(data)\n File.open(\"test/basic_complex_test/ruby_test/bin.babel.rb\", \"w+b\") do |f|\n f.write(data)\n end\n end", "def to_file(filename)\n\tdata2write\t= to_json();\n\tFile.open(filename, 'w+'){|f| f.write(data2write)}\nend", "def write_file(file, data)\n File.open(file, 'w') { |fd| fd.write(data) }\n end", "def to_file( f )\n buf = [ MAGIC, VERSION, @timestamp.to_i, @analyses.length() \n ].pack(PFORMAT)\n f.write(buf)\n\n @analyses.each do |a|\n a.to_file(f)\n end\n end", "def saveIndex(fileName = \"dataTest.dat\", data)\n\t\tfile = File.new(fileName, \"w+\")\n\t\tserializedData = Marshal.dump(data)\n\t\tfile.write(serializedData)\n\t\tfile.close\n\tend", "def write(filename)\n File.open(filename, \"wb\") { |f| f.write(@raw_data) }\n end", "def write data\n assert !@closed\n\n # We store the text as a list so appending will be cheap.\n @text << data\n unless @silent\n $stdout.print data \n $stdout.flush\n end\n if @report_file\n @report_file.print data\n @report_file.flush\n end\n end", "def data_write\n fd = File.new('fraze.dat',\"w\")\n $words.each_index do\n |iw|\n printf(fd,\"%s|%d|%d|%s|%s\\n\",\n $words[iw].fname,\n $words[iw].enlevel,\n $words[iw].czlevel,\n $words[iw].english,\n $words[iw].czech)\n end\n fd.close\n puts \"\\nDatabase stored\"\nend", "def writeData(filename = \"out.csv\")\n\t\tfile = File.new(filename, \"w\")\n\t\t\n\t\[email protected] do |singleEntry|\n\t\t\tfile.puts \"#{singleEntry[0]},#{singleEntry[1]},#{singleEntry[2]}\"\n\t\tend\n\t\n\t\tfile.close\n\t\t\n\tend", "def dump( obj, &block )\n f = File.open( get_unique_filename, 'w' )\n\n serialized = serialize( obj )\n f.write( serialized )\n\n block.call( serialized ) if block_given?\n\n f.path\n ensure\n f.close\n end", "def dump(data_path=File.join(DATA_DIR, \"#{Time.now.to_i}.json\"))\n\n if(File.exists?(data_path))\n raise ArgumentError.new(\"Refusing to overwrite existing file #{data_path}\")\n else\n dir, base = File.split(data_path)\n cmd = \"mkdir -p #{dir}\"\n pid = status = stderr = nil\n #Open3.popen3([env,] cmd... [, opts]) {|stdin, stdout, stderr, wait_thr|\n Open3.popen3(cmd) {|stdin, stdout, stderr, wait_thr|\n pid = wait_thr.pid # pid of the started process.\n status = wait_thr.value # Process::Status object returned.\n }\n if(status.success?)\n else\n raise ArgumentError.new(\"Could not create directory #{dir}; stderr of #{cmd} : #{stderr}\")\n end\n end\n\n sql = \"select * from #{TABLE_KEY}\"\n query = [\n \"key=#{PROJECT_KEY}\",\n \"sql=#{CGI.escape(sql)}\"\n ]\n url = \"#{TABLE_URL}?#{query.join(\"&\")}\"\n response = get(url)\n \n File.open(data_path, \"w\"){|ff|\n size = ff.write(response.body)\n }\nend", "def saveListToFile()\n\t\t# f is going to equal data.txt with the 'write' capability:\n\t\tf = File.new('data.txt', 'w')\n\n\t\t# write searchSuggestionList to data.txt:\n\t\tf.write(\"#{@searchSuggestionList}\")\n\n\t\t# close data.txt/ end writing:\n\t\tf.close\n\tend", "def save\n return if @filename.nil?\n FileUtils.mkdir_p File.dirname(@filename)\n Utils.atomic_write(@filename) { |f| f.write(JSON.generate(@data)) }\n end", "def file_write(file2wrt, data2wrt)\n if not ::File.exists?(file2wrt)\n ::FileUtils.touch(file2wrt)\n end\n\n output = ::File.open(file2wrt, 'a')\n data2wrt.each_line do |d|\n output.puts(d)\n end\n output.close\n end", "def write_data\n data = {}\n tmp_file = \"#{@wallet_file}.tmp\"\n\n @data.each do |item|\n next if item.empty?\n\n data.merge!(\n item.id => {\n 'id' => item.id,\n 'group' => item.group,\n 'user' => item.user,\n 'url' => item.url,\n 'comment' => item.comment,\n 'last_edit' => item.last_edit,\n 'created' => item.created\n }\n )\n end\n\n Gem::Package::TarWriter.new(File.open(tmp_file, 'w+')) do |tar|\n data_encrypt = encrypt(data.to_yaml)\n tar.add_file_simple('wallet/meta.gpg', 0400, data_encrypt.length) do |io|\n io.write(data_encrypt)\n end\n\n @passwords.each do |id, password|\n tar.add_file_simple(\"wallet/passwords/#{id}.gpg\", 0400, password.length) do |io|\n io.write(password)\n end\n end\n\n @otp_keys.each do |id, key|\n tar.add_file_simple(\"wallet/otp_keys/#{id}.gpg\", 0400, key.length) do |io|\n io.write(key)\n end\n end\n\n @keys.each do |id, key|\n tar.add_file_simple(\"wallet/keys/#{id}.pub\", 0400, key.length) do |io|\n io.write(key)\n end\n end\n end\n\n File.rename(tmp_file, @wallet_file)\n rescue => e\n File.unlink(tmp_file) if File.exist?(tmp_file)\n\n raise \"#{I18n.t('error.mpw_file.write_data')}\\n#{e}\"\n end", "def write; end", "def write; end", "def save(id, file_data)\n\t\tpuzzle_path = \"./puzzles/puzzle-#{ id }.pdf\"\n\t\topen(puzzle_path, \"wb\") do |file|\n\t\t\tfile << file_data\n\t\tend\n\tend", "def write_down! \n File.open(@filename, \"wb\").write(raw)\n end", "def save_to_file(file_name = @current_file_name)\n # Loop on all dirline widgets\n File.open(file_name, 'wb') do |file|\n file.write(FILE_HEADER)\n file.write(Zlib::Deflate.deflate(RubySerial::dump({\n :dest_dir_names => @main_widget.get_dest_dirlines.map { |dirline_widget| dirline_widget.get_dir_name },\n :src_dir_names => @main_widget.get_src_dirlines.map { |dirline_widget| dirline_widget.get_dir_name },\n :data => @data\n })))\n end\n notify(\"File #{file_name} saved correctly\")\n @current_file_name = file_name\n invalidate_current_loaded_file\n end", "def save_to_disk(data, filename)\n return nil if data.nil?\n\n dest_file = File.join(tmp_dir, filename)\n FileUtils.rm_f(dest_file) # in case we wrote and aborted previously\n data.save_as(dest_file)\n SouvlakiRS.logger.info \" File saved: #{dest_file}\"\n dest_file\n end", "def to_file( f )\n buf = [@name, @sym, @value].pack(PFORMAT)\n f.write(buf)\n end", "def save\n File.open(file_path, 'w') do |file|\n YAML.dump(data, file)\n end\n end", "def save_data(obj, filename) \r\n File.open(File.join(Yrgss.game.getPath.getAbsolutePath,filename), \"wb\") { |f|\r\n Marshal.dump(obj, f)\r\n }\r\nend", "def save_cached_data\n # open file for writing\n @file_handle = File.open(file_path, 'w')\n \n # write data string into file\n @file_handle << (@file_data_to_write.respond_to?(:read) ? @file_data_to_write.read : @file_data_to_write)\n \n # close file\n close_file\n \n end", "def save_cached_data\n # open file for writing\n @file_handle = File.open(file_path, 'w')\n \n # write data string into file\n @file_handle << (@file_data_to_write.respond_to?(:read) ? @file_data_to_write.read : @file_data_to_write)\n \n # close file\n close_file\n \n end", "def create_save\n @save_data = {:turns => @turns,:guesses => @guesses,:secret_word => @secret_word, :hidden_word => @hidden_word}\n save = File.new(\"./lib/save.txt\", \"w+\")\n save.puts JSON::dump(save_data)\n save.close\n end", "def save_to_file(data, destination)\n path = File.join(file_dir, destination)\n File.write(path, data)\nend", "def save_to_file()\n File.open(@filename,\"w\") do |f|\n movies_hash = []\n @items.each do |movie|\n movies_hash.push(movie.to_hash)\n end\n f.write(JSON.pretty_generate(movies_hash))\n end\n end", "def dump out_file, title=@title\n format.dump @stats, out_file, title\n end", "def write_file_contents(ph, fname, data)\r\n\r\n\t\tdoc = rand_text_alphanumeric(16+rand(16))\r\n\r\n\t\t# StartDocPrinter\r\n\t\tstatus,jobid = start_doc_printer(ph, doc, fname)\r\n\t\tif status != 0 or jobid < 0\r\n\t\t\traise RuntimeError, \"Unable to start print job: #{Msf::WindowsError.description(status)}\"\r\n\t\tend\r\n\t\tprint_status(\"Job started: 0x%x\" % jobid)\r\n\r\n\t\t# WritePrinter\r\n\t\tstatus,wrote = write_printer(ph, data)\r\n\t\tif status != 0 or wrote != data.length\r\n\t\t\traise RuntimeError, ('Failed to write %d bytes!' % data.length)\r\n\t\tend\r\n\t\tprint_status(\"Wrote %d bytes to %%SystemRoot%%\\\\system32\\\\%s\" % [data.length, fname])\r\n\r\n\t\t# EndDocPrinter\r\n\t\tstatus = end_doc_printer(ph)\r\n\t\tif status != 0\r\n\t\t\traise RuntimeError, \"Failed to end print job: #{Msf::WindowsError.description(status)}\"\r\n\t\tend\r\n\tend", "def dump_to_file (file_name, workitem)\n\n File.open(file_name, 'w') do |file|\n file.print(encode_workitem(workitem))\n end\n end" ]
[ "0.76638824", "0.7298004", "0.7202988", "0.7185437", "0.7137326", "0.7137326", "0.7137326", "0.713604", "0.7111296", "0.7104809", "0.708573", "0.70680314", "0.7013293", "0.7011336", "0.69803196", "0.69469416", "0.69469416", "0.69469416", "0.69469416", "0.6917348", "0.6912466", "0.6912466", "0.6886283", "0.68819267", "0.6806463", "0.67802125", "0.67512906", "0.6736837", "0.67173797", "0.67136544", "0.66889364", "0.66784537", "0.6650487", "0.66446346", "0.6636287", "0.6627455", "0.66258514", "0.66157144", "0.65785396", "0.6553584", "0.652778", "0.6527446", "0.6521293", "0.65097225", "0.648731", "0.64763004", "0.6463297", "0.64599574", "0.64475673", "0.6446844", "0.6440174", "0.64195883", "0.64143497", "0.6406025", "0.64027804", "0.63912636", "0.63879806", "0.63603574", "0.6357999", "0.6354305", "0.6345456", "0.6341476", "0.63382643", "0.6332258", "0.6327979", "0.6313602", "0.6309501", "0.63003725", "0.6295186", "0.62892866", "0.62889034", "0.6288024", "0.62773556", "0.6263124", "0.6258072", "0.62531763", "0.62435603", "0.6243544", "0.62430465", "0.62417006", "0.62252367", "0.6218541", "0.62169474", "0.62067354", "0.62067354", "0.62057287", "0.6191884", "0.6181637", "0.6174884", "0.6173018", "0.6171026", "0.6166857", "0.61658806", "0.61658806", "0.613466", "0.6133203", "0.61318934", "0.6124551", "0.61244625", "0.6122551" ]
0.6216566
83
Initialize new System instance.
def initialize(options={}) extend self extend ShellUtils @root = options[:root] || Dir.pwd @ignore = options[:ignore] || [] #Ignore.new @rulebook = options[:rulebook] @state_file = options[:statefile] @session = OpenStruct.new @scripts = [] @rules = [] #@facts = [] @digests = {} @rulesets = {} import(*rulebook) end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize(keyring)\n @keyring = keyring\n @system = System.new\n end", "def initialize(name)\n # Initialize the system type structure.\n super(name)\n # Update the library of existing system types.\n # Note: no check is made so an exisiting system type with a same\n # name is overwritten.\n SystemTs[@name] = self\n end", "def setup_system(system_hash)\n @system = Souffle::System.from_hash(system_hash)\n @provider = initialize_provider(\n cleanup_provider(@system.try_opt[:provider]))\n end", "def initialize(system = {})\n @state = system\n @state[:time] ||= 0\n super()\n end", "def initialize(sys)\n @sys = VCS.select(sys)\n end", "def initialize\n\t\t\t@os_name = get_os_name\n\t\tend", "def initialize(system_name, pure = false)\n @system_name = system_name\n @current_stage = []\n @service_stages = []\n @stage_monitor = Monitor.new\n @service_monitor = Monitor.new\n @tx_monitor = Monitor.new\n @event_queue = Queue.new\n @service_registry = LocalServiceRegistry.new\n @supervisor = SupervisorCompanion.new self\n\n # Setting pure to true will keep core from registering a new framework by default\n unless pure\n # Stage 0: Framework level\n register_service :framework, Framework, self\n commit_stage\n\n # Stage 1: Core logger\n register_service :corelog, LoggerService, DEFAULT_CORE_LOGFILE\n commit_stage\n\n # Stage 2: Configuration service\n register_service :config, ConfigService, \"config/config.yml\"\n commit_stage\n\n # Stage 3: Console host level\n register_service :console_host, ConsoleHostService, STDIN, STDOUT, STDERR\n commit_stage\n\n @framework = @supervisor[:framework]\n end\n end", "def initialize\n define_os\n define_path\n read_settings\n end", "def initialize\n @program_start_pointer = 0\n init_name\n init_clock\n init_register\n init_ports\n end", "def test_system_new\n api = Mu::System.new(@mu_ip, @mu_admin_user, @mu_admin_pass)\n assert(api.host == @mu_ip, \"failed to set mu_ip\")\n assert(api.docroot == \"/api/v5/system/\", \"failed to set docroot\")\n end", "def initialize(machine)\n end", "def init_system(arg = nil)\n set_or_return(:init_system,\n arg.nil? ? arg : arg.to_sym,\n kind_of: Symbol,\n equal_to: [:upstart],\n default: :upstart)\n end", "def initialize\n # Create the DLL handlers\n @@lowDLL = DLL.new('System/fmod.dll')\n @@studioDLL = DLL.new('System/fmodstudio.dll')\n \n # Import the functions from each DLL\n loadLowDLL()\n loadStudioDLL()\n \n handle = Studio::System.create()\n @system = Studio::System.new(handle)\n @masterBank = @system.loadBankFile(\"Master Bank.bank\")\n @stringsBank = @system.loadBankFile(\"Master Bank.strings.bank\")\n @sfxBank = @system.loadBankFile(\"SFX.bank\")\n end", "def initialize\n init\n end", "def initialize(*args)\n super\n mon_initialize\n end", "def initialize\n @console = Console.new\n end", "def with_system(sys_config)\n @config[AF::SYSTEM] = sys_config\n self\n end", "def initialize(system)\n @system = system\n #@name = (options[:name] || MASTER_NAME).to_s\n #@ignore = options[:ignore]\n\n @filename = system.state_file\n\n @current = Hash.new{ |h,k| h[k.to_s] = {} }\n @saved = Hash.new{ |h,k| h[k.to_s] = {} }\n\n read\n refresh\n end", "def initialize(*arguments) \n super()\n if(Model::TARGETHOST == Model::CLIENTHOST)\n @runner = System.new(File.join(ZIMBRAPATH,'bin','tomcat'), ZIMBRAUSER, *arguments)\n else \n @runner = StafSystem.new(Model::TARGETHOST, File.join(ZIMBRAPATH,'bin','tomcat'), ZIMBRAUSER, *arguments)\n end \n end", "def init\n end", "def init\n end", "def init\n end", "def initialize(system = nil, authentication = nil)\n if system.nil? or authentication.nil?\n require 'yaml'\n data = YAML.load_file File.expand_path(ENV['TICKETMASTER_CONFIG'] || '~/.ticketmaster.yml')\n system = system.nil? ? data['default'] || data.first.first : system.to_s\n authentication = data[system]['authentication'] if authentication.nil? and data[system]['authentication']\n end\n self.extend TicketMaster::Provider.const_get(system.to_s.capitalize)\n authorize authentication\n @symbol = system.to_sym\n @provider = TicketMaster::Provider.const_get(system.to_s.capitalize)\n end", "def system\n JenkinsApi::Client::System.new(self)\n end", "def system\n JenkinsApi::Client::System.new(self)\n end", "def initialize()\n\t\tsuper\n\t\tself.servicekinds = []\n\t\tself.status = ''\n\t\tsetup_known_services\n\t\tload_rc\n\t\tself\n\tend", "def initialize(*)\n super\n machine\n end", "def init\n return unless @context.nil?\n\n $stdout.puts('Loading JARs...')\n\n load_jar_files\n\n $stdout.puts(\"Loading #{@dspace_cfg}...\")\n org.dspace.core.ConfigurationManager.load_config(@dspace_cfg)\n\n kernel_impl = org.dspace.servicemanager.DSpaceKernelInit.get_kernel(nil)\n unless kernel_impl.is_running\n $stdout.puts('Starting new DSpaceKernel...')\n kernel_impl.start(@dspace_dir)\n end\n @kernel = kernel_impl\n\n @context = org.dspace.core.Context.new\n end", "def define_system(*types, &update)\n Cedar.define_system(*types, &update)\n end", "def initialize\n initialize!\n end", "def initialize\n initialize!\n end", "def initialize\n @viewport = Viewport.new(0,0,Graphics.width,Graphics.height)\n @viewport.z = 99999\n # Initialize shell options if not set\n $ShellOptions ||= ShellOptions.load\n # Get the active config. If none is found, use the default config.\n if !$ShellOptions.activeConfig || !$ShellOptions.shellConfigs.has_key?($ShellOptions.activeConfig)\n $ShellOptions.shellConfigs['default'] ||= ShellConfiguration.newDefault\n $ShellOptions.activeConfig = 'default'\n end\n @config = $ShellOptions.shellConfigs[$ShellOptions.activeConfig]\n # Create the console window and set the available commands.\n @window = ConsoleWindow.new(self,@viewport.rect)\n @prompt = @config.prompt\n @aliases = $ShellOptions.shellAliases\n @commands = {}\n self.set_commands\n @context = nil\n self.main\n end", "def initialize\n super\n configure_logging\n detect_platform()\n detect_resolver()\n load_config()\n end", "def initialize(ptr, retain = true)\n super(ptr)\n #STDERR.puts \"Allocating Platform: #{ptr}\"\n end", "def initialize(machine, name, options = T.unsafe(nil)); end", "def set_system\n @system = System.find(params[:id])\n end", "def set_system\n @system = System.find(params[:id])\n end", "def initialize(bus = Systemd::Helpers.system_bus)\n @service = bus.service(Systemd::SERVICE)\n @object = @service.object(NODE)\n @object.default_iface = INTERFACE\n @object.introspect\n end", "def create_system(system, tag_prefix=nil)\n end", "def initialize(virtual_machine)\n @handle = virtual_machine\n end", "def initialize(name)\n @name = name\n @make = \"/opt/csw/bin/gmake\"\n @tar = \"/usr/sfw/bin/gtar\"\n @patch = \"/usr/bin/gpatch\"\n @sed = \"/opt/csw/bin/gsed\"\n @shasum = \"/opt/csw/bin/shasum\"\n # solaris 10\n @num_cores = \"/usr/bin/kstat cpu_info | awk '{print $$1}' | grep '^core_id$$' | wc -l\"\n super(name)\n if @architecture == \"sparc\"\n @platform_triple = \"sparc-sun-solaris2.#{@os_version}\"\n elsif @architecture == \"i386\"\n @platform_triple = \"i386-pc-solaris2.#{@os_version}\"\n end\n end", "def initialize()\n end", "def initialize(machine, options = T.unsafe(nil)); end", "def initialize(...)\n super\n mon_initialize\n end", "def initialize(shell)\n\t\tsuper\n\tend", "def initialize\n @game = Game.new\n @machine = Machine.new(game)\n end", "def initialize_environment\n end", "def initialize_environment\n end", "def initialize!\n load_passenger\n initialize_ush_api\n initialize_debugging\n undo_bundler\n end", "def initialize\n puts \"constructor is automatically called when we create object\"\n end", "def initialize(*options)\n # @system_data = {'some' => 'data}\n super(*options)\n end", "def init\n raise NotImplementedError\n end", "def init\n self['env'] ||= 'development'\n\n n = (self['port'] || 3000).to_i\n assert n >= 0 && n <= 65535\n self['port'] = n\n\n n = (self['workers'] || self['worker'] || ((CpuCounter.count + 1)/ 2)).to_i\n assert n > 0 && n < 1000\n self['workers'] = n\n\n unless self['root']\n set :root, Dir.pwd\n end\n self['root'] = File.realpath File.expand_path self['root']\n\n # todo warn paths not under project?\n self['views'] = project_path(self['views'] || 'views')\n if self['public']\n self['public'] = project_path(self['public'])\n end\n\n if self['assets']\n self['assets'] = project_path(self['assets'])\n end\n\n self.logger = create_logger\n\n assert !self['before_fork'] || self['before_fork'].respond_to?('call')\n assert !self['after_fork'] || self['after_fork'].respond_to?('call')\n\n self['timeout'] ||= 120\n timeout = self['timeout'].to_i\n assert timeout > 0 && timeout < 2**30\n self['timeout'] = timeout\n Ext.set_inactive_timeout timeout\n end", "def system=(value)\n @system = value\n end", "def initialize\n end", "def initialize\n end", "def initialize\n end", "def initialize\n end", "def initialize\n end", "def initialize\n end", "def initialize\n @environment_variables = {}\n @aliases = {}\n @commands = []\n end", "def initialize(name='<none>', version='', parent=nil)\r\n @name, @vm_version = name, version\r\n Thread.current[:vm] = self\r\n @dictionary = Dictionary.new(self, parent && parent.dictionary)\r\n @vm_version = parent.vm_version unless parent.nil?\r\n interpreter_reset\r\n compiler_reset\r\n end", "def initialize(ruby_opts='')\n @proxy = nil\n @socket = \"/tmp/rubysu-#{Process.pid}-#{object_id}\"\n @sudo_pid = nil\n @ruby_opts = ruby_opts\n @loaded_features = []\n # @load_path = [] # currentl unused\n end", "def initialize\n @registry = Registry.new\n end", "def initialize() end", "def initialize modname, lang\n require 'rubygems'\n require 'platform'\n\n @cpu = Platform::ARCH\n @os = Platform::OS\n\n if @os == :unix then # we need to be more specific\n @os = Platform::IMPL\n end\n\n @vars = Hash.new\n setup_toolset\n setup_paths modname\n setup_conf lang, modname\n end", "def initialize\n end", "def initialize\n # Load the setup settings and any user overrides.\n @settings = Configuration::CombinedConfiguration.new\n @settings.load_configuration('default', 'Configuration/setup.yaml')\n @settings.load_configuration('user', 'Configuration/user.yaml')\n\n @ruby_bin = RbConfig::CONFIG['bindir']\n @install_flag = \".bundle/#{RUBY_VERSION}_#{ruby_platform}.flag\"\n\n # The timestamp values - the installer is run depending on these values.\n @t1 = UserSettings.last_modify_date.to_i\n @t2 = File.mtime('Gemfile').to_i\n\n @gem_path = File.join(@settings['setup.gem_path'], ruby_platform)\n @mysql_dir = File.join(FRAMEWORK_ROOT, \"#{LIB_DIR}/mysql/#{ruby_platform}\")\n\n runtime.save(:ruby_bin, @ruby_bin)\n runtime.save(:gem_path, @gem_path)\n runtime.save(:mysql_dir, @mysql_dir)\n runtime.save(:setup_settings, @settings)\n end", "def initialize(paths = [])\n @at = Time.now\n @boot_time = Vmstat.boot_time\n @cpus = Vmstat.cpu\n @disks = paths.map { |path| Vmstat.disk(path) }\n @load_average = Vmstat.load_average\n @memory = Vmstat.memory\n @network_interfaces = Vmstat.network_interfaces\n @task = Vmstat.task if Vmstat.respond_to? :task\n end", "def init; end", "def init; end", "def init; end", "def init; end", "def init\n\n end", "def initialize\n super()\n @vagrant_dir = @system.try_opt(:vagrant_dir)\n create_new_vm_group unless current_folder_has_souffle_config?\n generate_vagrant_config\n end", "def initialize(config)\n setup(config)\n log 'New World Instance initialized!'\n end", "def io_initialize\n STDOUT.sync = true unless STDOUT.tty?\n return if PSDK_CONFIG.release?\n @cmd_thread = create_command_thread\n rescue StandardError\n puts 'Failed to initialize IO related things'\n end", "def initialize(system)\n @callbacks = []\n @cbs = [open_cb, close_cb, seek_cb, close_cb]\n FMOD.invoke(:System_AttachFileSystem, system, *@cbs)\n end", "def initialize(environment_name = \"default\", options = {})\n merge_config! options\n @environment_name = environment_name\n @shell_wrapper = Shell.default_wrapper.new\n @shell_wrapper.setup do |s|\n source_rvm_environment\n use_rvm_environment\n end\n end", "def begin_provisioning\n @provisioner = Souffle::Provisioner::System.new(@system, @provider)\n end", "def initialize()\n end", "def initialize()\n end", "def initialize()\n end", "def initialize()\n end", "def initialize()\n end", "def initialize()\n end", "def initialize()\n end", "def initialize()\n end", "def initialize\n System.stats.each { |s| instance_variable_set(\"@#{s}\", System.make_stat) }\n end", "def initialize(bus = Systemd::Helpers.system_bus)\n @service = bus.service(Importd::SERVICE)\n @object = @service.object(NODE)\n @object.default_iface = INTERFACE\n @object.introspect\n end", "def initialize()\n\t\tend", "def initialize(*args)\n super\n chef_server run_context.cheffish.current_chef_server\n end", "def initialize # :notnew:\n evaluate PRELUDE, PRELUDE_PATH, 1\n global.Johnson.runtime = self\n global['Ruby'] = Object\n evaluate CORE, CORE_PATH, 1\n end", "def _initialize(name, machine)\n initialize_capabilities!(\n name.to_sym,\n { name.to_sym => [Class.new, nil] },\n Vagrant.plugin(\"2\").manager.provider_capabilities,\n machine,\n )\n end", "def initialize(path)\n\n #Open the device, and store its handle.\n @handle = DeviceManager::open_device(path) \n\n #If we didn't get a valid handle, raise the relevant exception.\n if @handle.nil?\n raise DeviceManager::last_error\n end\n\n end", "def initialize(machine, config)\n @machine = machine\n @config = config\n end", "def initialize(machine, config)\n @machine = machine\n @config = config\n end", "def initialize\n end", "def initialize\n end", "def initialize\n end", "def initialize\n end" ]
[ "0.724665", "0.7126794", "0.6942167", "0.6874744", "0.6853828", "0.6848445", "0.6805444", "0.67442214", "0.6483095", "0.6481711", "0.64611375", "0.6420318", "0.6398328", "0.63961893", "0.6332454", "0.63303274", "0.63118666", "0.6305797", "0.6298951", "0.6281667", "0.6281667", "0.6281667", "0.6276365", "0.6250975", "0.6250975", "0.6239968", "0.6238302", "0.6227832", "0.6186825", "0.61852205", "0.61852205", "0.61827666", "0.6173808", "0.6153462", "0.61464614", "0.61407363", "0.61407363", "0.611594", "0.6114726", "0.608643", "0.6076214", "0.6064078", "0.60533124", "0.6050849", "0.6034011", "0.60333365", "0.6030358", "0.6030358", "0.60056084", "0.6001565", "0.59921825", "0.59746885", "0.5958972", "0.59581107", "0.5956008", "0.5956008", "0.5956008", "0.5956008", "0.5956008", "0.5956008", "0.5952253", "0.5945915", "0.59448236", "0.59428596", "0.59395623", "0.59378386", "0.5917461", "0.5916875", "0.59010696", "0.58948064", "0.58948064", "0.58948064", "0.58948064", "0.58837014", "0.58812845", "0.5863846", "0.5860197", "0.58586013", "0.5857044", "0.5854205", "0.58510154", "0.58510154", "0.58510154", "0.58510154", "0.58510154", "0.58510154", "0.58510154", "0.58510154", "0.58491915", "0.5836925", "0.5828082", "0.5812888", "0.5811373", "0.5810733", "0.5806373", "0.5795937", "0.5795937", "0.5795151", "0.5795151", "0.5795151", "0.5795151" ]
0.0
-1
def default(rulesets) ruleset :default => rulesets end Rulesets provide a separate space for rules which are only run when the ruleset name is specifically given. Return [Ruleset]
def ruleset(name_and_chain, &block) name, chain = parse_ruleset_name(name_and_chain) if @rulesets.key?(name) ruleset = @rulesets[name] ruleset.update(chain, &block) else ruleset = Ruleset.new(self, name_and_chain, &block) @rulesets[name] = ruleset end ruleset end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def default\n defaults = {}\n @rules.each {|rule| rule.complete_defaults(defaults)}\n defaults\n end", "def default_rule; end", "def default_rules(key)\n @vertex.fetch(@rule_prefix + key, nil)\n end", "def default_collection_rule\n Proc.new do |collection_rule, objects, options|\n # Passed values\n (options[:values] || {}).each { |key, value| collection_rule.send(\"#{key}=\".to_sym, value)}\n \n # Default values\n collection_rule.id ||= options[:self]\n collection_rule.title ||= \"#{objects.first.class.to_s.pluralize.demodulize} feed\"\n collection_rule.updated ||= updated_collection(objects)\n\n # Transitions\n collection_rule.links << link(:rel => :self, :href => options[:self]) unless options[:self].nil?\n end\n end", "def default_collection_rule\n Proc.new do |collection_rule, objects, options|\n # Passed values\n (options[:values] || {}).each { |key, value| collection_rule.send(\"#{key}=\".to_sym, value)}\n \n # Default values\n collection_rule.id ||= options[:self]\n collection_rule.title ||= \"#{objects.first.class.to_s.pluralize.demodulize} feed\"\n collection_rule.updated ||= updated_collection(objects)\n\n # Transitions\n collection_rule.links << link(:rel => :self, :href => options[:self]) unless options[:self].nil?\n end\n end", "def ruleset(ruleset, ctx)\r\n rulelist = rulelist(ruleset, ctx)\r\n\r\n cmtSuffix = \"\"\r\n ruleParams = \"#{ruleset.execType}\" # Build the ruleset parameter list.\r\n\r\n if (ruleset.type == \"PL\")\r\n ruleParams += \", PL\"\r\n cmtSuffix += \"(PowerLookup)\"\r\n end # if ruleset.type\r\n\r\n aliasStmt = \"\" # Don't create an alias statement if it is not needed.\r\n\r\n if (ruleset.name != ruleset.alias)\r\n aliasStmt = <<EOF\r\nalias(ruleset, #{ruleset.name}, \"#{ruleset.alias}\");\r\nEOF\r\n end # if ruleset.name...\r\n\r\n\r\n out = <<EOF\r\n#{aliasStmt}\r\n/* ==========================================================================\r\n * #{ruleset.name} #{cmtSuffix}\r\n *\r\n *\r\n */\r\nruleset #{ruleset.name}(#{ruleParams})\r\n#{rulelist}\r\nend // ruleset #{ruleset.name}(#{ruleParams})\r\n\r\n\r\n\r\n\r\nEOF\r\n\r\n return out\r\n\r\n end", "def default!\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 42 )\n\n type = DEFAULT\n channel = ANTLR3::DEFAULT_CHANNEL\n\n \n # - - - - main rule block - - - -\n # at line 163:11: 'default'\n match( \"default\" )\n\n \n @state.type = type\n @state.channel = channel\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 42 )\n\n end", "def rule_set\n @rule_set ||= Rules::RuleSet.build_for(calendar: calendar, kind: kind)\n end", "def default_clause\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 29 )\n return_value = DefaultClauseReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n root_0 = nil\n string_literal120 = nil\n char_literal121 = nil\n statement_list122 = nil\n\n tree_for_string_literal120 = nil\n tree_for_char_literal121 = nil\n\n begin\n root_0 = @adaptor.create_flat_list\n\n\n # at line 434:5: 'default' ':' ( statement_list )?\n string_literal120 = match( DEFAULT, TOKENS_FOLLOWING_DEFAULT_IN_default_clause_2900 )\n if @state.backtracking == 0\n\n tree_for_string_literal120 = @adaptor.create_with_payload( string_literal120 )\n root_0 = @adaptor.become_root( tree_for_string_literal120, root_0 )\n\n end\n char_literal121 = match( COLON, TOKENS_FOLLOWING_COLON_IN_default_clause_2904 )\n # at line 434:23: ( statement_list )?\n alt_25 = 2\n look_25_0 = @input.peek( 1 )\n\n if ( look_25_0 == GENERAL || look_25_0 == GET || look_25_0 == ARROW || look_25_0 == IF || look_25_0 == REGEX || look_25_0 == INCR || look_25_0 == BREAK || look_25_0 == RETURN || look_25_0 == IS_DEFINED || look_25_0 == LBRACE || look_25_0 == LBRACK || look_25_0.between?( SEMI, CONST ) || look_25_0.between?( SET, LET ) || look_25_0 == DDOC || look_25_0.between?( DECR, LPAREN ) || look_25_0 == DELETE || look_25_0.between?( DGENERAL, DO ) || look_25_0 == THROW || look_25_0 == TILDE || look_25_0 == TRUE || look_25_0 == TRY || look_25_0.between?( TYPEOF, NEW ) || look_25_0.between?( EACH, UNDEFINED ) || look_25_0.between?( NULL, UNLESS ) || look_25_0 == UNTIL || look_25_0 == FALSE || look_25_0 == VAR || look_25_0.between?( VOID, FOR ) || look_25_0 == WHILE || look_25_0.between?( WITH, YIELD ) || look_25_0.between?( IS_UNDEFINED, DOC ) || look_25_0.between?( T__148, T__150 ) )\n alt_25 = 1\n end\n case alt_25\n when 1\n # at line 434:23: statement_list\n @state.following.push( TOKENS_FOLLOWING_statement_list_IN_default_clause_2908 )\n statement_list122 = statement_list\n @state.following.pop\n if @state.backtracking == 0\n @adaptor.add_child( root_0, statement_list122.tree )\n end\n\n end\n # - - - - - - - rule clean up - - - - - - - -\n return_value.stop = @input.look( -1 )\n\n if @state.backtracking == 0\n\n return_value.tree = @adaptor.rule_post_processing( root_0 )\n @adaptor.set_token_boundaries( return_value.tree, return_value.start, return_value.stop )\n\n end\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n return_value.tree = @adaptor.create_error_node( @input, return_value.start, @input.look(-1), re )\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 29 )\n\n end\n \n return return_value\n end", "def CreateDefault\n @Behaviour = :one\n tmp_hex_ip = @hostid\n @tomerge = Builtins.add(@tomerge, tmp_hex_ip)\n while Builtins.size(tmp_hex_ip) != 1\n tmp_hex_ip = Builtins.substring(\n tmp_hex_ip,\n 0,\n Ops.subtract(Builtins.size(tmp_hex_ip), 1)\n )\n @tomerge = Builtins.add(@tomerge, tmp_hex_ip)\n end\n @tomerge = Builtins.add(@tomerge, Builtins.toupper(@mac))\n @tomerge = Builtins.add(@tomerge, Builtins.tolower(@mac))\n @tomerge = Builtins.add(@tomerge, \"default\")\n Builtins.y2milestone(\"Created default rules=%1\", @tomerge)\n nil\n end", "def default_clause\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 18 )\n return_value = DefaultClauseReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n root_0 = nil\n\n _last = _first_0 = nil\n string_literal67 = nil\n statement_list68 = nil\n\n tree_for_string_literal67 = nil\n\n begin\n root_0 = @adaptor.create_flat_list\n\n\n # at line 103:5: ^( 'default' ( statement_list )? )\n _save_last_1 = _last = @input.look\n _first_1 = nil\n root_1 = @adaptor.create_flat_list\n _last = @input.look\n string_literal67 = match( DEFAULT, TOKENS_FOLLOWING_DEFAULT_IN_default_clause_529 )\n\n tree_for_string_literal67 = @adaptor.copy_node( string_literal67 )\n\n root_1 = @adaptor.become_root( tree_for_string_literal67, root_1 )\n\n\n\n if @input.peek == DOWN\n match( DOWN, nil )\n # at line 103:18: ( statement_list )?\n alt_19 = 2\n look_19_0 = @input.peek( 1 )\n\n if ( look_19_0.between?( AMP, AMP_ASGN ) || look_19_0 == POST_DECR || look_19_0.between?( GEQ, AREF ) || look_19_0.between?( GREATER, HAT ) || look_19_0.between?( ARROW, HAT_ASGN ) || look_19_0.between?( ASGN, REGEX ) || look_19_0.between?( IN, RETURN ) || look_19_0 == INCR || look_19_0.between?( BREAK, RSHIFT3 ) || look_19_0.between?( LABEL, CATCH ) || look_19_0 == RSHIFT_ASGN || look_19_0 == LEQ || look_19_0.between?( LESS, SLASH ) || look_19_0.between?( SLASH_ASGN, CONTINUE ) || look_19_0.between?( STAR, DECR ) || look_19_0 == STAR_ASGN || look_19_0.between?( LSHIFT, THIS ) || look_19_0 == THROW || look_19_0.between?( MINUS, MOD ) || look_19_0.between?( MOD_ASGN, TYPEOF ) || look_19_0.between?( NEQ, UMINUS ) || look_19_0.between?( NEQQ, UNDEFINED ) || look_19_0.between?( NEW, UPLUS ) || look_19_0.between?( OBJECT, FALSE ) || look_19_0.between?( WITH, PLUS ) || look_19_0.between?( ID, DOC ) )\n alt_19 = 1\n end\n case alt_19\n when 1\n # at line 103:18: statement_list\n _last = @input.look\n @state.following.push( TOKENS_FOLLOWING_statement_list_IN_default_clause_531 )\n statement_list68 = statement_list\n @state.following.pop\n\n @adaptor.add_child( root_1, statement_list68.tree )\n\n\n end\n\n match( UP, nil )\n end\n @adaptor.add_child( root_0, root_1 )\n _last = _save_last_1\n\n\n\n return_value.tree = @adaptor.rule_post_processing( root_0 )\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 18 )\n\n end\n \n return return_value\n end", "def default\n all['index_sets'].find { |i| i['default'] == true }\n end", "def chain=(*rulesets)\n @chain = rulesets.map{ |b| b.to_sym }\n end", "def is_default?\n @rule_details.is_default == true\n end", "def get_default\n list.each do |plan|\n return plan if plan['default']\n end\n nil\n end", "def get_default\n list.each do |plan|\n return plan if plan['default']\n end\n nil\n end", "def create_default_rule(question_content, display_field, name)\n return if display_field.nil?\n\n question_content.survey_version.rules.create! :name => name, :rule_order => (question_content.survey_version.rules.count + 1),\n :execution_trigger_ids => [ExecutionTrigger::ADD],\n :action_type => 'db',\n :criteria_attributes => [\n {:source_id => question_content.id, :source_type => \"QuestionContent\", :conditional_id => 10, :value => \"\"}\n ],\n :actions_attributes => [\n {:display_field_id => display_field.id, :value_type => \"Response\", :value => question_content.id.to_s}\n ]\n end", "def ruleset_rules\n @rulesets.collect do |r|\n [\"# Begin [#{r.name}]\",\n r.firewall_rules,\n \"# End [#{r.name}]\",\n \"\"]\n end\n end", "def set_default\n end", "def default_definition() {} end", "def rules_by_name; end", "def valid_sets\n sets = site.config[\"defaults\"]\n return [] unless sets.is_a?(Array)\n\n sets.filter_map do |set|\n if valid?(set)\n massage_scope!(set)\n # TODO: is this trip really necessary?\n ensure_time!(set)\n else\n Bridgetown.logger.warn \"Defaults:\", \"An invalid front-matter default set was found:\"\n Bridgetown.logger.warn set.to_s\n nil\n end\n end\n end", "def default(options = {})\n options[:gemset] ? @parent.show_alias(:default) : @parent.list_default\n end", "def rules\n #\n # This is called first in case any preable needs to be declared (chains, specifically)\n #\n _ruleset_rules = ruleset_rules\n\n [\n Asbestos.firewall.preamble(self),\n _ruleset_rules,\n Asbestos.firewall.postamble(self)\n ].flatten\n end", "def boolean_rule_test_sets\n {\n 'not set': 'fail',\n 'set': 'pass'\n }\nend", "def get_staging_rule_sets\n get(\"#{url_base}/staging/rule_sets?#{dc}\")[\"data\"]\n end", "def defaults!; end", "def defaults!; end", "def add_default(constraints)\n return unless constraints\n\n @defaults ||= []\n @defaults << constraints\n end", "def defaults(defs = {})\n\t\t\t@defaults ||= {}\n\t\t\[email protected]!(defs)\n\t\tend", "def defaults; end", "def defaults; end", "def defaults; end", "def defaults; end", "def defaults; end", "def defaults; end", "def defaults; end", "def defaults; end", "def defaults; end", "def defaults; end", "def default; end", "def default; end", "def [](ruleset)\n for_ruleset(ruleset)\n end", "def configured_default\n @options[:default] || default_entry\n end", "def default\n end", "def defaults_definition\n return @defaults if @defaults\n\n @defaults = ancestors_with(Default)\n .inject(Definition.empty) do |acc, elem|\n acc.merge(elem.module_defaults_definition)\n end\n end", "def default\n @default || atlas_default\n end", "def create_default_roles\n # Create the default Rolesets\n Jak.rolesets.each do |role_set|\n my_role = roles.find_or_create_by(name: role_set.role_name, key: role_set.role_name.parameterize, company: self)\n my_role.permission_ids = role_set.permission_ids\n my_role.save!\n end\n end", "def method_missing(rule_set_name, args = {})\n template = Asbestos::RuleSet[rule_set_name]\n raise %{Unknown host DSL call : \"#{rule_set_name}\" for host \"#{name}\"} unless template\n\n @rulesets << \\\n Asbestos::RuleSet.new(rule_set_name, self, template).tap do |rs|\n # override template defaults with provided options\n args.each do |k, v|\n rs.send k, v\n end\n end\n end", "def set_defaults\n end", "def set_defaults\n end", "def rules( *new_values )\n\t\tself.rules = new_values unless new_values.empty?\n\t\treturn @rules\n\tend", "def set_default\n cmd = \"{\\\"id\\\":8,\\\"method\\\":\\\"set_default\\\",\\\"params\\\":[]}\\r\\n\"\n request(cmd)\n end", "def defaults\n []\n end", "def default_groups\n @default_groups ||= begin\n if ENV['AUTHORIZED_NETWORK_GROUPS'].is_a?(String)\n group_list_from_env(ENV['AUTHORIZED_NETWORK_GROUPS'])\n else\n [:default]\n end\n end\n end", "def rule(rule_str, player: :any, default: nil)\n @rule_heirarchy.each do |rule_set|\n # check rule unless it doesn't apply to player\n next unless rule_set.affects? player\n # lookup rule -- move to next if rule not found\n return rule_set[rule_str, default: default] rescue next\n end\n raise RuleSet::UnknownRule, rule_str\n end", "def default\n @default = true\n end", "def default(default)\n @default = default\n self\n end", "def set_defaults\n\n end", "def set_defaults\n\n end", "def set_defaults\n\n end", "def set_defaults\n\n end", "def set_defaults\n\n end", "def set_defaults\n\n end", "def default=(_); end", "def define_suits\n [{ 'name' => 'default',\n 'run_list' => [\"recipe[#{@cookbook}::default]\"],\n 'attributes' => 'nil' }]\n end", "def rules; end", "def rules; end", "def rules; end", "def rules; end", "def rules; end", "def rules; end", "def rules; end", "def initialize(default_rules = true)\n @rules = {}\n @extension_options = {}\n if default_rules\n rule(:_) {|r,node| node.literal}\n rule(\"*\"){|r,node| r.apply(node.children)}\n end\n yield(self) if block_given?\n end", "def rules\n @rules ||= {}\n end", "def default_criteria\n []\n end", "def assign_defaults\n\t\t## WHICH OF OUR EMPLOYEES CAN RESIGN, BECOMES THE SAME AS WHO CAN VERIFY.\n\t\tself.which_of_our_employees_will_resign_outsourced_reports = self.who_can_verify_reports if self.which_of_our_employees_will_resign_outsourced_reports.blank?\n\tend", "def extended_grammar(sets)\n rules = []\n sets.each do |set|\n set.items.each do |item|\n if item.dot == 0\n rule = [item]\n next_item = item.next_item\n while next_item != nil\n rule << next_item\n next_item = next_item.next_item\n end\n rules << rule\n end\n end\n end\n rules\n end", "def rule_set(ref, val)\n rule_table.set(setup_package_id(ref), val)\n end", "def default_checks()\n return @checks\n end", "def create_default_admin_set\n rake 'hyrax:default_admin_set:create'\n end", "def default_design\n designs.select { |d| d.default? }.first\n end", "def rules\n @rules ||= {}\n end", "def rules\n @rules ||= {}\n end", "def rules\n @rules ||= {}\n end", "def default(*names)\n self.class.schema.default(self.class, names)\n end", "def default\r\n @opts[:default]\r\n end", "def get_default_action\n default = actions.map {|x| get_action(x) }.select {|x| x.default }\n if default.length > 1\n raise \"The actions #{default.map(&:name).join(\", \")} cannot all be default\"\n end\n default.first\n end", "def default\n options[:default]\n end", "def score_benchmark_default\n return 0.0 unless @groups\n\n count = 0\n cumulative_score = 0.0\n\n @groups.each do |group|\n # Default weighted scoring only provides value when more than one rule exists per group. This implementation\n # is not currently supporting more than one rule per group so weight need not apply.\n rule_score = score_default_rule(test_results(group.rule.id))\n\n if rule_score[:rule_count].positive?\n count += 1\n cumulative_score += rule_score[:rule_score]\n end\n end\n\n return 0.0 unless count.positive?\n\n (cumulative_score / count).round(2)\n end", "def check_frontend_defaults(default_bill=nil, default_ship=nil)\n if default_bill\n check 'default_bill'\n elsif default_bill == false\n uncheck 'default_bill'\n end\n\n if default_ship\n check 'default_ship'\n elsif default_ship == false\n uncheck 'default_ship'\n end\n end", "def choose_default_route(routes)\n routes.select do |r|\n r[:destination] == \"default\"\n end.min do |x, y|\n (x[:metric].nil? ? 0 : x[:metric].to_i) <=> (y[:metric].nil? ? 0 : y[:metric].to_i)\n end\n end", "def select_default metric;\n @select_default = metric\n end", "def required_defaults; end", "def set_default_value(opt1, opt2)\n return if @yaml[opt1][opt2].present?\n return if @record && @record[@yaml['name']].present?\n\n @yaml[opt1][opt2] = if @yaml['default'].class == Hash\n evaluate = @yaml['default']['eval']\n return if evaluate.blank?\n # add @parent if it's a method call and @parent is not present\n if evaluate[0] != evaluate[0].upcase && !evaluate.match('@parent')\n evaluate.prepend('@parent.')\n end\n eval(evaluate)\n else\n @yaml['default']\n end\nend", "def defaults\n @ordered_elements.select{|s| s.default?}\n end", "def default_collection\n Collection.new(@backend, @name, \"_default\", \"_default\")\n end", "def addDefaultGraph(uri) \n\n\t\tif uri \n\t\t self._querytext.push([\"default-graph-uri\",uri])\n\t\tend\n\tend", "def defaults\n @defaults\n end", "def default_if_not_specified(opt,default_opt)\n opt ? opt : default_opt\nend" ]
[ "0.72728306", "0.6999354", "0.6238038", "0.5906943", "0.5906943", "0.586761", "0.5863049", "0.5844489", "0.56966907", "0.5692319", "0.56261885", "0.5594931", "0.5563753", "0.55405605", "0.55262625", "0.55262625", "0.55021584", "0.547669", "0.54765284", "0.545972", "0.5412245", "0.5391125", "0.5389421", "0.53581345", "0.53570634", "0.5331954", "0.53286195", "0.53286195", "0.5324727", "0.5322474", "0.5283184", "0.5283184", "0.5283184", "0.5283184", "0.5283184", "0.5283184", "0.5283184", "0.5283184", "0.5283184", "0.5283184", "0.52531046", "0.52531046", "0.5252012", "0.5240988", "0.5238842", "0.5195607", "0.51811063", "0.51780856", "0.517308", "0.5155288", "0.5155288", "0.5154977", "0.51549", "0.5138448", "0.513353", "0.5107141", "0.50974184", "0.5092505", "0.509189", "0.509189", "0.509189", "0.509189", "0.509189", "0.509189", "0.5086201", "0.50780696", "0.50697064", "0.50697064", "0.50697064", "0.50697064", "0.50697064", "0.50697064", "0.50697064", "0.5067049", "0.5055038", "0.50489104", "0.50438976", "0.5042235", "0.50382644", "0.5033768", "0.5033671", "0.5033166", "0.5028405", "0.5028405", "0.5028405", "0.5010424", "0.5001247", "0.49863145", "0.4981735", "0.49751523", "0.49720737", "0.4971564", "0.49554124", "0.4945317", "0.49449676", "0.49433437", "0.49419355", "0.49414596", "0.49398354", "0.4924351" ]
0.5299962
30
Import from another file, or glob of files, relative to project root. TODO: Should importing be relative to the importing file? Returns nothing.
def import(*globs) globs.each do |glob| #if File.relative?(glob) # dir = Dir.pwd #session.root #File.dirname(caller[0]) # glob = File.join(dir, glob) #end Dir[glob].each do |file| next unless File.file?(file) # add warning next if @scripts.include?(file) @scripts << file module_eval(File.read(file), file) end end end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import(glob, opts={})\n paths = []\n\n glob = glob + '**/*' if glob.end_with?('/')\n\n if from = opts[:from]\n paths = Find.path(glob, :from=>from)\n else\n if glob.start_with?('/')\n if root = lookup_root\n glob = File.join(root, glob)\n else\n raise \"no project root for #{glob}\" unless root\n end\n end\n paths = Dir.glob(glob)\n end\n\n paths = paths.select{ |path| File.file?(path) }\n paths = paths[0..0] if opts[:first]\n\n load_files(*paths)\n\n paths.empty? ? nil : paths\n end", "def import_file(path)\n return Kernel.load(path)\nend", "def import_file_path\n @import_file_path ||= real_import_file_path\n end", "def import_path\n @import_paths ||= Pathname.new(source_dir).join(data['import_path'] || './imports').to_s\n end", "def import!\n return selected_importer.import!\n end", "def require_relative(file, *args)\n paths = $:\n paths.push(File.realpath(File.absolute_path(File.dirname(caller[0][/^([^:]+)/,1]))))\n\n paths.each do |p|\n path = File.join(p, file)\n path = \"#{path}.rb\" unless path.end_with? \".rb\"\n if File.exists?(path) || File.exists?(path + \".rb\")\n file = File.realpath(path)\n break\n end\n end\n\n @@__sets[file] = :original\n\n __require_relative(file, *args)\n end", "def import(*args)\n first = args.first\n return import_all(first) if first.instance_of?(Module)\n\n opts = args.pop\n source = opts.fetch(:from)\n rename = opts.fetch(:as) { first.to_sym }\n\n return import_methods(source, args) if args.count > 1\n import_method(source, first, rename)\n end", "def import(file, kind: 'auto', **options)\n file = File.expand_path(file)\n if kind == 'auto'\n kind = Registry.guess_type(file)\n end\n if handler = Registry.import_handler_for(file, kind)\n return handler.import(file, registry: self, **options)\n else\n raise ArgumentError, \"no importer defined for #{file}, detected as #{kind}\"\n end\n end", "def import_all(path, caller_location = caller(1..1).first)\n Modulation.import_all(path, caller_location)\n end", "def import_file(file_path)\n raise NotImplementedError\n end", "def require_relative(path, from: :both)\n caller_path = caller_locations.first.path.to_s\n caller_dir = Pathname.new(caller_path).dirname.to_s\n prefix = Ruar.path_prefix.to_s\n\n # Ruar Internal File\n caller_dir = caller_dir.delete_prefix(prefix).prepend(File::SEPARATOR) if caller_dir.start_with?(prefix)\n\n resolved_path = File.expand_path(path, caller_dir)\n require(resolved_path, from: from)\n end", "def inject_import_path(path, options = {})\n path = path.to_s unless path.is_a?(String)\n if path =~ /(.*?)\\/.*/\n import_name = Regexp.last_match[1].downcase.to_sym\n if import_name == :origen || import_name == :origen_core || Origen.app.plugins.names.include?(import_name) ||\n import_name == :doc_helpers\n # Special case to allow a shortcut for this common import plugin and to also handle legacy\n # code from when it was called doc_helpers instead of origen_doc_helpers\n if import_name == :doc_helpers\n root = Origen.app(:origen_doc_helpers).root\n else\n unless import_name == :origen || import_name == :origen_core\n root = Origen.app(import_name).root\n end\n end\n if options[:type] == :template\n if import_name == :origen || import_name == :origen_core\n path.sub! 'origen', \"#{Origen.top}/templates/shared\"\n else\n if File.exist?(\"#{root}/app/templates/shared\")\n path.sub! Regexp.last_match[1], \"#{root}/app/templates/shared\"\n else\n path.sub! Regexp.last_match[1], \"#{root}/templates/shared\"\n end\n end\n else\n fail 'Unknown import path type!'\n end\n end\n end\n path\n end", "def import_file(filename)\n h = Oj.load_file(filename)\n import(h)\n end", "def require(glob)\n Dir.glob(File.join root_path.to_s, *glob).each do |file|\n Kernel.require file\n end\n self\n end", "def AddProjectImport(path)\n @ProjectFileLoader.load(path)\n end", "def import\n end", "def import\n end", "def import\n end", "def import\n end", "def import(file, kind = 'auto', options = {})\n file = File.expand_path(file)\n\n handler = Registry.handler_for(file, kind)\n if handler.respond_to?(:call)\n return handler.call(self, file, kind, options)\n else\n kind = handler\n end\n\n do_merge =\n if options.has_key?('merge') then options.delete('merge')\n elsif options.has_key?(:merge) then options.delete(:merge)\n else true\n end\n\n options = Registry.format_options(options)\n\n do_import(file, kind, do_merge, options)\n end", "def importToLocal(entry)\n raise RuntimeError, \"Cannot import names in global namespace\" if @currentPath.empty?\n if @currentPath.hasChild? then\n self[@root].sendImportToLocal(@currentPath.getChild,entry)\n else\n self[@root].importToLocal(entry)\n end\n end", "def load_file_path!\n @files = FilepathScanner.call(\n include_paths,\n exclude_path_regexps,\n recursive_scan: recursive_include\n )\n end", "def include_from(path)\n mod = import(path, caller(1..1).first)\n add_module_methods(mod, self)\n add_module_constants(mod, self)\n end", "def import_file(args)\n import_file = File.join(absolute_import_dir, self.file)\n unless File.exists?(import_file)\n FileUtils.mkdir absolute_import_dir unless File.directory?(absolute_import_dir)\n FileUtils.mv args[:tempfile].path, import_file\n @table_class = InverseCsvImporter.new(import_file).table_class\n end\n end", "def import( file, project, input, options = {})\n # TODO - is there a default .. XML .. ?? - can't think of suitable one !\n # - if no suitable default exists raise error cos no abstract in Ruby\n end", "def import\n gitpath_import = params['gitpath'] ;\n params['gitpath'] = params['gitpath'].sub(/^.*:[0-9]+/, '') ;\n end", "def import(path, dest_path='')\n # FIXME : implement\n raise(\"not implemented\")\n # if URI ...\n end", "def absolute_import_dir\n File.join(Merb.root, \"public#{relative_import_dir}\")\n end", "def import()\n # TODO\n end", "def transform_and_import\n transform_files\n import\n end", "def import!(file)\n if File.directory?(file)\n # ...\n else\n case File.extname(file)\n when '.yaml', '.yml'\n merge!(YAML.load(File.new(file)))\n else\n text = File.read(file)\n if /\\A---/ =~ text\n name = File.basename(file)\n self[name] = YAML.load(text)\n else\n name = File.basename(file)\n self[name] = text.strip\n end\n end\n end\n end", "def import_module_path(path)\n v = path.split('/')\n \"#{v[v.size-2]}/#{v[v.size-1]}\"\n end", "def include\n File.join(@root, 'include')\n end", "def load_monkfile\n file = find_in_project(\"Monkfile\")\n\n if file\n load file\n @project = File.dirname(file)\n Dir.chdir @project\n end\n end", "def imports_directory\n workspace_manager.imports_directory\n end", "def require_relative(path)\n require File.join(File.dirname(caller[0]), path.to_str)\nend", "def r(filename)\n require \"./#{filename}\"\nend", "def relative(filename)\n filename.sub(%r{#{File.expand_path(File.join(File.dirname(__FILE__), \"..\", \"src\"))}/}, \"\")\nend", "def import(arg = nil)\n set_or_return(\n :import, arg,\n :kind_of => String,\n :default => nil\n )\n end", "def import(name)\n file = \"#{config_dir}/#{name}.rb\"\n load_config(file)\n end", "def import(package)\n srcdir = package.srcdir\n if File.directory?(srcdir)\n package.isolate_errors(false) do\n if Autobuild.do_update\n perform_update(package)\n else\n if Autobuild.verbose\n puts \" not updating #{package.name}\"\n end\n return\n end\n end\n\n elsif File.exists?(srcdir)\n raise ConfigException, \"#{srcdir} exists but is not a directory\"\n else\n perform_checkout(package)\n end\n end", "def load(*globs)\n skips = globs.grep(/^-/)\n (globs - skips).each do |glob|\n glob += '.rb' if glob =~ /\\*$/\n Dir.glob(glob).sort.each do |path|\n next unless File.file? path\n next if skips.find {|pat| path =~ /#{pat[1..-1]}$/}\n instance_eval(File.read(path), path)\n end\n end\n end", "def import_from_local_src_dir(pkginfo, local_src_dir, pkg_target_importdir)\n Packager.info \"Preparing source dir #{pkginfo.name} from existing: '#{local_src_dir}' -- import into: #{pkg_target_importdir}\"\n if !pkginfo.importer_type || !pkginfo.importer_type == :git\n Packager.info \"Package importer requires copying into target directory\"\n FileUtils.cp_r local_src_dir, pkg_target_importdir\n else\n pkginfo.import(pkg_target_importdir)\n end\n end", "def load_imports\n while fn = @pending_imports.shift\n next if @imported.member?(fn)\n ext = File.extname(fn)\n loader = @loaders[ext] || @default_loader\n loader.load(fn)\n @imported << fn\n end\n end", "def relative_src(filename, dir=nil)\n file = expand_src filename, dir\n base = Pathname.new File.dirname path_info\n Pathname.new(file).relative_path_from(base).to_s\n end", "def load_project_ruby\r\n # Load up patches and the like for the project\r\n if File.exist?(File.join(SAF::PROJECTS, project, \"lib\")) then\r\n Dir.glob(File.join(SAF::PROJECTS, project,\r\n \"lib\", \"**\", \"*.rb\")) do |f|\r\n require_relative(f)\r\n end\r\n end\r\n\r\n # Load up page objects and steps for this project\r\n load_directory(File.join(SAF::PROJECTS, project, \"page_objects\"))\r\n load_directory(File.join(SAF::PROJECTS, project, \"features\",\r\n \"step_definitions\"))\r\n end", "def import_diff_file(file_path)\n raise NotImplementedError\n end", "def import(name)\n file = \"#{config_dir}/#{name}.rb\"\n load_config(file)\n end", "def do_import(*arguments, &block)\n if arguments.length == 0\n @plugin_import_block = block\n else\n logger \"Inside #{self.title} parser for file: #{arguments[1] rescue ''}\"\n @plugin_import_block.call(*arguments, &block)\n end\n end", "def lookup!\n $LOAD_PATH.each do |base|\n Dir[File.join(base, *file_lookup_paths)].each do |path|\n path = path.delete_prefix(\"#{base}/\")\n require path\n rescue Exception\n # No problem\n end\n end\n end", "def import_files(dir)\n Dir.glob(dir.to_s).map {|file| import_file(file)}\nend", "def module_search_path(path)\n\t\t\[email protected]_import_path(path)\n\t\tend", "def import(*files)\r\n require \"models/user_task\"\r\n require \"models/log_record\"\r\n require \"timeout\"\r\n establish! do\r\n unless( files.empty? )\r\n files.each do |file|\r\n puts \"Loading %s\" % file\r\n File.open(file, \"r:utf-8\") do |stream|\r\n import_from stream\r\n end\r\n end\r\n else\r\n begin\r\n require \"stringio\"\r\n str = Timeout::timeout(10) {\r\n $stdin.set_encoding(\"utf-8\")\r\n $stdin.read # read as utf-8, ensured by top of this file comments\r\n }\r\n puts \"Loading from stdin\"\r\n import_from StringIO.new(str,\"r:utf-8\")\r\n rescue Timeout::Error\r\n raise \"You should specify several import file\"\r\n end\r\n end\r\n end\r\n end", "def load_files_of(path)\n Dir.foreach(path) do |file|\n src = [path, file].join('/')\n \n # call yourself if file is a directory\n load_files_of(src) if File.directory?(src) and file != '.' and file != '..'\n \n sym = File.basename(file, '.rb').split('_').map{|el| el.capitalize}.join('').to_sym\n unless (src == __FILE__) or \n File.directory?(file) or \n !(src[-3,3] == '.rb') or \n (REQUIRE_RELATIVE.include?(file))\n autoload sym, src\n end\n end\nend", "def app_require(file)\n require File.expand_path(file)\nend", "def link_include_file(file); end", "def link_include_file(file); end", "def explicit_relative(path)\n # Paths that do not start with \"/\", \"./\", or \"../\" will be prefixed with ./\n path.sub(%r(^(?!\\.{0,2}/)), './')\n end", "def require_relative(relative_feature)\n c = caller.first\n fail \"Can't parse #{c}\" unless c.rindex(/:\\d+(:in `.*')?$/)\n file = $`\n if /\\A\\((.*)\\)/ =~ file # eval, etc.\n raise LoadError, \"require_relative is called in #{$1}\"\n end\n absolute = File.expand_path(relative_feature, File.dirname(file))\n require absolute\nend", "def import(name, options={})\n raise 'import is not implemented yet'\n end", "def import(name, options={})\n raise 'import is not implemented yet'\n end", "def load_sync_file_relative( fpath = './sync.rb' )\n load_sync_file( path_relative_to_caller( fpath, caller ) )\n end", "def import\n files.each {|song_filename| Song.new_by_filename(song_filename)}\n end", "def gem_expand_file(rel_file)\n if File.exist?(a=File.join(GEM_ROOT, rel_file))\n File.expand_path(a)\n else\n raise Errno::ENOENT, rel_file\n end\n end", "def pyfrom(name)\n Import.from(name)\n end", "def cmd_db_import(*args)\n\t\t\treturn unless active?\n\t\t\tif (args.include?(\"-h\") or not (args and args.length > 0))\n\t\t\t\tprint_error(\"Usage: db_import <filename> [file2...]\")\n\t\t\t\tprint_line\n\t\t\t\tprint_line(\"filenames can be globs like *.xml, or **/*.xml which will search recursively\")\n\t\t\t\treturn\n\t\t\tend\n\t\t\targs.each { |glob|\n\t\t\t\tfiles = Dir.glob(File.expand_path(glob))\n\t\t\t\tif files.empty?\n\t\t\t\t\tprint_error(\"No such file #{glob}\")\n\t\t\t\t\tnext\n\t\t\t\tend\n\t\t\t\tfiles.each { |filename|\n\t\t\t\t\tif (not File.readable?(filename))\n\t\t\t\t\t\tprint_error(\"Could not read file #{filename}\")\n\t\t\t\t\t\tnext\n\t\t\t\t\tend\n\t\t\t\t\tbegin\n\t\t\t\t\t\tframework.db.import_file(:filename => filename) do |type,data|\n\t\t\t\t\t\t\tcase type\n\t\t\t\t\t\t\twhen :filetype\n\t\t\t\t\t\t\t\tprint_status(\"Importing '#{data}' data\")\n\t\t\t\t\t\t\twhen :address\n\t\t\t\t\t\t\t\tprint_status(\"Importing host #{data}\")\n\t\t\t\t\t\t\twhen :msfx_loot\n\t\t\t\t\t\t\t\tprint_status(\"Importing loot #{data}\")\n\t\t\t\t\t\t\twhen :msfx_task\n\t\t\t\t\t\t\t\tprint_status(\"Importing task #{data}\")\n\t\t\t\t\t\t\twhen :msft_report\n\t\t\t\t\t\t\t\tprint_status(\"Importing report #{data}\")\n\t\t\t\t\t\t\tend\n\t\t\t\t\t\tend\n\t\t\t\t\t\tprint_status(\"Successfully imported #{filename}\")\n\t\t\t\t\trescue DBImportError\n\t\t\t\t\t\tprint_error(\"Failed to import #{filename}: #{$!}\")\n\t\t\t\t\t\telog(\"Failed to import #{filename}: #{$!.class}: #{$!}\")\n\t\t\t\t\t\tdlog(\"Call stack: #{[email protected](\"\\n\")}\", LEV_3)\n\t\t\t\t\t\tnext\n\t\t\t\t\tend\n\t\t\t\t}\n\t\t\t}\n\t\tend", "def import(module_name, options = nil)\n\t\t\tmodule_name = @module_name + module_name if module_name =~ /^#/\n\t\t\tmodule_name = @project.resolve_module_alias( module_name )\n\t\t\[email protected]_module_options(module_name, options) if options\n\t\t\t@current_context.add_import(@project.add_import(module_name, self,false))\n\t\t\tsource_module_name = ( @module_name || @project.to_s ) + '#' + @current_section_name\n\t\t\t@importlist << [ module_name, source_module_name ]\n\t\tend", "def import\n files.each{|f| Song.new_by_filename(f)}\n end", "def load(glob)\n b = binding\n Dir[glob].each do |f|\n eval(File.read(f), b, f)\n end\n end", "def ovl_expand_file(rel_file)\n if File.exist?(rel_file)\n File.expand_path(rel_file)\n elsif File.exist?(a=File.join(GEM_ROOT, rel_file))\n File.expand_path(a)\n else\n raise Errno::ENOENT, rel_file\n end\n end", "def ovl_expand_file(rel_file)\n if File.exist?(rel_file)\n File.expand_path(rel_file)\n elsif File.exist?(a=File.join(GEM_ROOT, rel_file))\n File.expand_path(a)\n else\n raise Errno::ENOENT, rel_file\n end\n end", "def require_real_file(file)\n return unless file\n require File.realpath(file)\n end", "def import\n files.each{|file| Song.new_by_filename(file)}\n end", "def specimport(path, options = {})\n if options[:inline]\n @read_spec = path\n else\n self.specfile = path\n end\n specdata = load_spec()\n import_specification(specdata)\n\n problem\n end", "def import\n files.each {|filename| Song.new_by_filename(filename)}\n end", "def import(another)\n merge! configs.import(another)\n end", "def wrong; require 'unknown_file'; end", "def import\n files.each do |filename|\n Song.new_by_filename(filename)\n end\n end", "def add_import_path(current_string, builder_mode)\n adjusted_import_path = if @import_path.present?\n path_has_leading_slash = @import_path[0] == '/'\n log_hint(BuilderHints::IMPORT_PATH_LEADING) if path_has_leading_slash\n builder_mode == BuilderMode::EXECUTE && path_has_leading_slash ? @import_path[1..-1] : @import_path\n else\n @import_path\n end\n add_if_present(@import_path, current_string, \" -import-path '#{adjusted_import_path}' \")\n end", "def import!(options = {})\n fail \"#{self.name} is not importable\" unless importable?\n\n import_object = options.fetch(:object, nil)\n execute = options.fetch(:execute, true)\n import_file = get_import_file(import_object, options)\n\n call_dispatcher(import_object, execute, import_file)\n end", "def load_glob(glob)\n Dir[glob].each { |f| load f }\n end", "def require_p(path)\n require join(project_dir,path)\n end", "def process_imports(body, context, directory)\n body.scan(IMPORT_PATTERN) do |import_tag|\n if path = import_tag[HREF_PATH_PATTERN, :path]\n absolute_path = File.absolute_path(path, directory)\n context.require_asset(absolute_path)\n end\n end\n\n body.gsub(IMPORT_PATTERN, \"\")\n end", "def import\n self.files.each{|file| Song.new_by_filename(file)}\n end", "def import\n files.each{ |f| Song.create_from_filename(f) }\n end", "def import\n files.each do |e|\n Song.create_from_filename(e)\n end\n end", "def import\n files.each {|file_name| Song.new_by_filename(file_name)}\n end", "def read_relative_file(name, from=__FILE__)\n File.read(File.join(File.dirname(from), name))\n end", "def initiate_import!\n ROOT_FETCHERS.each do |importer|\n importer.new.import!\n end\n end", "def source_paths\n Array(super) +\n [File.expand_path(File.dirname(__FILE__))]\nend", "def source_paths\n Array(super) +\n [File.expand_path(File.dirname(__FILE__))]\nend", "def source_paths\n Array(super) +\n [File.expand_path(File.dirname(__FILE__))]\nend", "def source_paths\n Array(super) +\n [File.expand_path(File.dirname(__FILE__))]\nend", "def source_paths\n Array(super) +\n [File.expand_path(File.dirname(__FILE__))]\nend", "def source_paths\n Array(super) +\n [File.expand_path(File.dirname(__FILE__))]\nend", "def import_folder! folder\n end", "def import_from_csv\n puts 'Which file would you like to import?'\n file_path = gets.chomp\n CsvImporter.new(@database, file_path).import\n puts 'Import complete.'\n rescue FileNotFoundError\n puts 'The specified file was not found.'\n rescue CsvInvalidError => e\n puts e.message\n end", "def process (file, original_path =nil)\n log \"-- #{file} ...\"\n for import in get_imports(file) do\n path = import.to_s\n unless path.start_with?(\"/usr/lib/\") || path.start_with?(\"/System/\")\n dst = copy_lib(import, (original_path || file))\n unless dst.absolute?\n dst = '@loader_path/' + dst.relative_path_from(file.dirname).to_s\n end\n change_import(file.to_s, import.to_s, dst.to_s)\n end\n end\n log \"\\tend #{file}\"\nend", "def engine_from_glob(glob, base_path, options)\n context = options[:custom][:sprockets_context]\n imports = resolve_glob(context, glob, base_path).inject('') do |imports, path|\n context.depend_on path\n relative_path = path.relative_path_from Pathname.new(base_path).dirname\n imports << %(@import \"#{relative_path}\";\\n)\n end\n return nil if imports.empty?\n ::Sass::Engine.new imports, options.merge(\n :filename => base_path.to_s,\n :syntax => :scss,\n :importer => self\n )\n end" ]
[ "0.7323573", "0.65813524", "0.6214822", "0.59600264", "0.58899266", "0.577969", "0.57250607", "0.568822", "0.5681246", "0.56758887", "0.5617326", "0.5614111", "0.5596892", "0.55946434", "0.55924934", "0.5525471", "0.5525471", "0.5525471", "0.5525471", "0.5494579", "0.5488046", "0.5448669", "0.5420472", "0.54103804", "0.53730226", "0.53507334", "0.5348414", "0.53353673", "0.5334082", "0.5328725", "0.5291881", "0.52590525", "0.5241835", "0.5205292", "0.5195641", "0.516112", "0.5150052", "0.51469743", "0.51459885", "0.5128178", "0.5124124", "0.5101584", "0.50958383", "0.50944376", "0.50874346", "0.5072224", "0.5071807", "0.5059831", "0.5054015", "0.5040777", "0.5028556", "0.5019334", "0.50148755", "0.50047785", "0.4991973", "0.49877307", "0.49877307", "0.4983785", "0.49690875", "0.4968117", "0.4968117", "0.49672526", "0.4966173", "0.4959109", "0.49558046", "0.4953808", "0.49535525", "0.49508056", "0.49496716", "0.494884", "0.494884", "0.49224877", "0.492139", "0.49106106", "0.48876742", "0.48870578", "0.4883648", "0.4878565", "0.48716462", "0.4862455", "0.48576823", "0.48573464", "0.48557332", "0.48556733", "0.48438266", "0.48426688", "0.4842579", "0.4841029", "0.4840556", "0.48320594", "0.48320594", "0.48320594", "0.48320594", "0.48320594", "0.48320594", "0.48296338", "0.48255935", "0.48201808", "0.48182917" ]
0.67391557
2
Add paths to be ignored in file rules. globs List of file globs. [Array] Returns [Array]
def ignore(*globs) @ignore.concat(globs) unless globs.empty? @ignore end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ignore!(*globs)\n @ignore.replace(globs)\n @ignore\n end", "def ignored\n [\n '.agignore',\n '.cvsignore',\n '.gitignore',\n '.hgignore',\n ].map do |file_with_ignore_patterns|\n if File.exist? file_with_ignore_patterns\n patterns = File.read(file_with_ignore_patterns).split(\"\\n\")\n patterns.map do |pattern|\n next if pattern =~ /^#/\n next if pattern =~ /^\\s*$/\n \"-not \\\\( -path \\\"*#{pattern}*\\\" -prune \\\\)\"\n end.compact.join(' ')\n else\n ''\n end\n end.join(' ') + [\n \"-not \\\\( -path \\\"*\\\\.git*\\\" -prune \\\\)\"\n ].join(' ')\nend", "def ignore_files=(patterns)\n @finder.add_patterns(patterns)\n end", "def ignores(*file_paths)\n self.ignore_paths += file_paths\n end", "def process_exclusions(globs); end", "def ignore(*globs)\n @watchlist.ignore(globs) unless globs.empty?\n end", "def process_globs globs\n result = globs.flat_map do |glob|\n Dir[File.join directory, glob]\n .map{ |f| f.gsub(/\\\\/, '/') }\n .select { |f| File.file?(f) }\n end\n result\n end", "def ignore_paths(options)\n source = options['source']\n destination = options['destination']\n config_files = Configuration[options].config_files(options)\n paths = config_files + Array(destination)\n ignored = []\n\n source_abs = Pathname.new(source).expand_path\n paths.each do |p|\n path_abs = Pathname.new(p).expand_path\n begin\n rel_path = path_abs.relative_path_from(source_abs).to_s\n ignored << Regexp.new(Regexp.escape(rel_path)) unless rel_path.start_with?('../')\n rescue ArgumentError\n # Could not find a relative path\n end\n end\n ignored\n end", "def hg_ignore_files( *pathnames )\n\t\t\tpatterns = pathnames.flatten.collect do |path|\n\t\t\t\t'^' + Regexp.escape(path) + '$'\n\t\t\tend\n\t\t\ttrace \"Ignoring %d files.\" % [ pathnames.length ]\n\n\t\t\tIGNORE_FILE.open( File::CREAT|File::WRONLY|File::APPEND, 0644 ) do |fh|\n\t\t\t\tfh.puts( patterns )\n\t\t\tend\n\t\tend", "def ignored_files\n all_files.select { |f| ignore_matcher.matched?(f) }\n end", "def ignore!(*globs)\n @watchlist.ignore!(globs)\n end", "def amass(include_globs, exclude_globs=[], ignore=[])\n include_globs = include_globs.map{ |f| File.directory?(f) ? File.join(f, '**/*') : f } # Recursive!\n exclude_globs = exclude_globs.map{ |f| File.directory?(f) ? File.join(f, '**/*') : f } # Recursive!\n include_files = include_globs.flatten.map{ |g| Dir.glob(g) }.flatten.uniq\n exclude_files = exclude_globs.flatten.map{ |g| Dir.glob(g) }.flatten.uniq\n files = include_files - exclude_files\n files = files.reject{ |f| ignore.any?{ |x| File.fnmatch?(x, File.basename(f)) } }\n files\n end", "def ignore_files_with_regexps(files, regexps)\n files.select { |f| regexps.all? { |r| r.match(f.file).nil? } }\n end", "def appendGlobs(arr, patterns); patterns.each { |p| arr << Dir.glob(p) }; arr.flatten end", "def glob(path, options = {})\n path = File.expand_path(path)\n if File.file?(path) || options[:no_recurse] == true\n [path]\n else\n [path] + Dir.glob(\"#{path}/**/*\", File::FNM_DOTMATCH).reject{|f| IMPLIED_DIR_ENTRIES =~ f }\n end\n end", "def no_extension_files(base_dir, wildcard, non_exts = [])\n list = []\n unless non_exts.empty?\n list = Dir.glob(File.join(base_dir, wildcard, \"{#{non_exts.join(',')}}\"))\n end\n list\n end", "def process_exclusions globs\n remainder = globs.select do |glob|\n if glob_is_directory?(glob)\n exdir = File.join(directory, glob_to_directory(glob))\n included.delete_if { |file| file.start_with?(exdir) }\n false\n else\n true\n end\n end\n process_globs remainder\n end", "def ignore(*paths)\n paths.each do |path|\n if path.is_a? Array\n config.ignore.concat path\n else\n config.ignore << path\n end\n end \n end", "def excluded_files() = []", "def ignore_assets\n ret = []\n ret << [\"assets/styles/atoms/flex\", \"assets/styles/atoms/flex/*\"]\n ret << [\"assets/styles/styleguide\", \"assets/styles/styleguide/**/*\"]\n ret << \"assets/styles/**/*.liquid\"\n ret << \"assets/styles/**/*.css\"\n ret << \"assets/styles/*.scss\"\n ret\n end", "def glob(*patterns)\n require \"pathname\"\n patterns.reduce([]) do |result, pattern|\n if pattern.end_with?(\"/**\")\n pattern += \"/\"\n end\n result += Dir.glob(pattern).map do |path|\n Pathname.new(path.gsub(\"\\\\\", \"/\")).cleanpath.to_s\n end\n end.sort\n end", "def excludes(*paths)\n self.excluded_files.concat(expand_globs(paths))\n end", "def files\n @files ||= Dir.glob(File.join(@root, '**/*'), GLOB_FLAGS).select do |path|\n File.file?(path) && path !~ IGNORE_REGEX\n end\n end", "def ignore(*patterns)\n @ignore_patterns += patterns\n end", "def apply_exclusions paths\n paths.reject do |path|\n excludes.any? do |pattern|\n globs = pattern.split(\"\\\\\")\n components = path.split(\"/\")\n\n # Inno Setup includes a feature in which you can anchor excludes at\n # the root by starting the exclude with a \"\\\". Since I don't want\n # to make this more complicated than I have to, I'm not implementing\n # this feature at this time.\n if globs[0] == \"\"\n raise \"Can't handle anchored exclude #{pattern}\"\n end\n \n globs_match_strings_anywhere? globs, components\n end\n end\n end", "def dont_reload(*glob)\n Dir[*glob].each { |path| Watcher::List.for(self).ignore(path) }\n end", "def files() = files_path.glob('**/*')", "def glob(*globs)\n files.find_all do |path|\n globs.any? { |pattern| File.fnmatch(pattern, path, File::FNM_EXTGLOB) }\n end\n end", "def git_ignore(*paths)\n paths.each {|path| append_to_file '.gitignore', \"#{path}\\n\"}\n end", "def process_globs(globs); end", "def ignore!(*regexps)\n directories_records.each { |r| r.ignore!(*regexps) }\n self\n end", "def ignore(*regexps)\n directories_records.each { |r| r.ignore(*regexps) }\n self\n end", "def file_list(pattern)\n FileList[pattern].tap do |list|\n list.exclude 'vendor/**/*', # bundler\n 'pkg/**/*', # gem build process\n 'spec/fixtures/**/*' # puppetlabs fixtures\n list.reject! { |f| File.directory? f }\n end\n end", "def excludes\n return Dir.chdir( base ) {\n %w(. .. .svn .git .hg CVS Rakefile) + files_for_erb +\n Dir.glob( '*~' ) + Dir.glob( '#*#' ) + Dir.glob( '*.bak' )\n }\n end", "def ignore_includes\n ret = []\n ret << \"_includes/jekyll/**/*\"\n ret << [\"_includes/styleguide\", \"_includes/styleguide/**/*\"]\n ret << \"_includes/*.html\"\n ret << [\"_includes/atoms/figure\", \"_includes/atoms/figure/*\"]\n ret << [\"_includes/atoms/sanitize.html\", \"_includes/atoms/imagetitle.html\", \"_includes/atoms/classname.html\"]\n ret\n end", "def ignore_paths\n Dir.glob(\"**/*\").select { |f| File.directory? f }\n .collect { |name| \"#{name}/\" }\n - [\"app/\",\n \"app/views/\",\n \"app/views/branded/\",\n \"app/views/branded/public_pages/\",\n \"app/views/branded/home/\",\n \"app/views/branded/contact_us/\",\n \"app/views/branded/contact_us/contacts/\",\n \"app/views/branded/shared/\",\n \"app/views/branded/layouts/\",\n \"app/views/branded/static_pages/\"]\nend", "def exclude_ignored_paths(dirs, ignore_paths = self.ignore_paths)\n Dir.glob(dirs.map { |d| \"#{d.sub(%r{/+$}, '')}/*\" }, File::FNM_DOTMATCH).reject do |path|\n ignore_paths.include?(File.basename(path))\n end\n end", "def ignored_files=(_arg0); end", "def filter(files)\n ruleset.watchlist.filter(files)\n #case ruleset.ignore\n #when Ignore\n # ruleset.ignore.filter(list)\n #when Array\n # list.reject!{ |path| ignore.any?{ |ig| /^#{ig}/ =~ path } }\n #else\n # list\n #end\n end", "def svn_ignore_files( *pathnames )\n\tpathnames.flatten!\n\n\tmap = pathnames.inject({}) do |map,path|\n\t\tmap[ path.dirname ] ||= []\n\t\tmap[ path.dirname ] << path.basename\n\t\tmap\n\tend\n\n\ttrace \"Ignoring %d files in %d directories.\" % [ pathnames.length, map.length ]\n\n\tmap.each do |dir, files|\n\t\ttrace \" %s: %p\" % [ dir, files ]\n\t\tio = open( '|-' ) or exec 'svn', 'pg', 'svn:ignore', dir\n\t\tignorelist = io.read.strip\n\t\tignorelist << \"\\n\" << files.join(\"\\n\")\n\t\tsystem 'svn', 'ps', 'svn:ignore', ignorelist, dir\n\tend\nend", "def clean_paths\n cached_used = used_files\n glob_options = File::FNM_DOTMATCH | File::FNM_CASEFOLD\n files = Pathname.glob(root + \"**/*\", glob_options).map(&:to_s)\n\n files.reject! do |candidate|\n candidate = candidate.downcase\n candidate.end_with?('.', '..') || cached_used.any? do |path|\n path = path.downcase\n path.include?(candidate) || candidate.include?(path)\n end\n end\n files\n end", "def safe_glob(dir, patterns, flags = T.unsafe(nil)); end", "def addSrcFilesByRE(re)\n Dir.for_each(@srcDir) { |f|\n next if File.stat(f).dir?\n @files << f if re =~ f\n }\n end", "def exclude_paths\n @source_paths ||= []\n end", "def find_files_without(source_path, ignored_paths, end_with = \"*.rb\")\n files_paths = Dir.glob(\"./#{source_path}/**/#{end_with}\")\n files_paths.select do |file_path|\n ignored_paths.map do |path|\n file_path.include?(\"/#{path}/\")\n end.none?\n end\n end", "def prepare_exception_globs(exceptions_file, source_dir)\n source_path = File.expand_path(source_dir)\n globs = []\n File.open(exceptions_file) do |file|\n file.each do |line|\n glob = line.strip\n if (glob.length > 0) && (glob[0..0] != '#')\n globs << \"#{source_path}/#{glob}\".gsub('//', '/')\n end\n end\n end\n return globs\n end", "def generate_glob_filter(glob)\n # Negative glob starts with '!'\n negative = glob.start_with?('!')\n # Strip leading '!' then\n glob.remove_prefix!('!') if negative\n lambda do |path|\n matches = File.fnmatch(glob, path, GLOB_MATCH_MODE)\n # inverse match if glob is negative\n matches = !matches if negative\n matches\n end\n end", "def load_file_exclusions\n return unless config_hash[\"Exclude Files\"]\n config_options[:excluded_files] = []\n config_hash[\"Exclude Files\"].each do |short_file|\n config_options[:excluded_files] << File.join(starting_path, short_file)\n end\n end", "def add_matching(pattern)\n Dir[ pattern ].each do |fn|\n self << fn unless exclude?(fn)\n end\n end", "def except(files,patterns)\n if !patterns.kind_of?(Array)\n patterns = [patterns]\n end\n files.select do |file|\n matches = true\n patterns.each do |pattern|\n if File.fnmatch(pattern,file)\n matches = false\n break\n end\n end\n matches\n end\n end", "def collect_paths(*paths)\n raw = [] # all paths and globs\n plus = Set.new # all paths to expand and add\n minus = Set.new # all paths to remove from plus set\n\n # assemble all globs and simple paths, reforming our glob notation to ruby globs\n paths.each do |paths_container|\n case (paths_container)\n when String then raw << (FilePathUtils::reform_glob(paths_container))\n when Array then paths_container.each {|path| raw << (FilePathUtils::reform_glob(path))}\n else raise \"Don't know how to handle #{paths_container.class}\"\n end\n end\n\n # iterate through each path and glob\n raw.each do |path|\n\n dirs = [] # container for only (expanded) paths\n\n # if a glob, expand it and slurp up all non-file paths\n if path.include?('*')\n # grab base directory only if globs are snug up to final path separator\n if (path =~ /\\/\\*+$/)\n dirs << FilePathUtils.extract_path(path)\n end\n\n # grab expanded sub-directory globs\n expanded = @file_wrapper.directory_listing( FilePathUtils.extract_path_no_aggregation_operators(path) )\n expanded.each do |entry|\n dirs << entry if @file_wrapper.directory?(entry)\n end\n\n # else just grab simple path\n # note: we could just run this through glob expansion but such an\n # approach doesn't handle a path not yet on disk)\n else\n dirs << FilePathUtils.extract_path_no_aggregation_operators(path)\n end\n\n # add dirs to the appropriate set based on path aggregation modifier if present\n FilePathUtils.add_path?(path) ? plus.merge(dirs) : minus.merge(dirs)\n end\n\n return (plus - minus).to_a.uniq\n end", "def ignore(*paths)\n @to_copy -= paths.flatten\n end", "def files_to_analyze\n require 'find'\n ignore_dirs = ['.git','bin','test','assets','lib','log','vendor','tmp','img', 'images', 'uploads', 'fonts']\n ignore_files = Regexp.union(/^\\..*$/i, /^.*(.md)$/i, /^.*(.json)$/i, /^.*(.yml)$/i, /^.*(.log)$/i, /^.*(.png)$/i, /^.*(.jpg)$/i, /^.*(.jpeg)$/i)\n final_files = []\n # for every file in repository - keep the files to process\n Find.find('.') do |path|\n path_name = File.basename(path)\n if FileTest.directory?(path)\n if ignore_dirs.include?(path_name)\n Find.prune\n else\n next\n end\n else\n if path_name.match(ignore_files)\n next\n else\n path.gsub!(/^\\.\\//, '')\n final_files.push(path)\n end\n end\n end\n return final_files\n end", "def add_matching(pattern)\n Dir[pattern].each do |fn|\n\tself << fn unless exclude?(fn)\n end\n end", "def exclude_paths(paths=nil)\n if !instance_variable_defined?(:@exclude_paths)\n @exclude_paths = []\n end\n (paths || []).each do |path|\n @exclude_paths << /^(\\w+\\:\\/\\/[^\\/]+\\/?)?#{path.to_s}$/\n end\n @exclude_paths\n end", "def ignoring\n %w{*_test.lua *_spec.lua .*}\n end", "def all_files_except_git\n Dir.glob('*', File::FNM_DOTMATCH).delete_if { |file| file =~ /\\A\\.{1,2}\\z|\\A\\.git\\z/ }\n end", "def ignore_orphan_files_from(paths_to_ignore)\n @ignored_orphan_files_paths << {\n ignored_paths: paths_to_ignore.is_a?(Array) ? paths_to_ignore : [paths_to_ignore],\n nodes_selectors_stack: current_nodes_selectors_stack\n }\n end", "def tracked_files\n all_files.reject { |f| ignore_matcher.matched?(f) }\n end", "def file_paths\n Dir.glob(@filepath_pattern).sort\n end", "def load(*globs)\n skips = globs.grep(/^-/)\n (globs - skips).each do |glob|\n glob += '.rb' if glob =~ /\\*$/\n Dir.glob(glob).sort.each do |path|\n next unless File.file? path\n next if skips.find {|pat| path =~ /#{pat[1..-1]}$/}\n instance_eval(File.read(path), path)\n end\n end\n end", "def file_paths\n src_file_mask = File.join(@folder_path, '**', '*.c')\n @file_paths = Dir.glob(src_file_mask)\n return @file_paths\n end", "def select_default_ignore_patterns\n\t@exclude_patterns = DEFAULT_IGNORE_PATTERNS.dup\n end", "def exclude(*files)\n files = to_artifacts(files)\n @excludes |= files\n @excludes |= files.reject { |f| f =~ /\\*$/ }.map { |f| \"#{f}/*\" }\n self\n end", "def add_matching(pattern)\n self.class.glob(pattern).each do |fn|\n self << fn unless excluded_from_list?(fn)\n end\n end", "def file_glob\n if file_types.nil?\n '*'\n else\n \"*{#{file_types.join(',')}}\"\n end\n end", "def glob\n \"**/*\"\n end", "def exclude_files(files, pwd)\n Dir.chdir(pwd)\n exclusions = @engine_config['exclude_paths'] || []\n files.reject { |f| exclusions.include?(f) }\n end", "def declare_chefignore_patterns\n @flavor.class.do_declare_resources do\n chefignore_patterns << 'Guardfile' if snippet?('standard_ignore')\n end\n end", "def getFiles(theArgs)\n\n\ttheFiles = [];\n\tpathsExclude = theArgs[:exclude];\n\n\ttheArgs[:paths].each do |pathRoot|\n\t\n\t\tif (File.exist?(pathRoot))\n\t\t\tFind.find(pathRoot) do |thePath|\n\t\t\t\tif (File.file?(thePath))\n\n\t\t\t\t\tif (!pathsExclude.include?(thePath))\n\t\t\t\t\t\tif (!FILES_EXCLUDE.include?(File.basename(thePath)))\n\t\t\t\t\t\t\ttheFiles << thePath;\n\t\t\t\t\t\tend\n\t\t\t\t\tend\n\n\t\t\t\tend\n\t\t\tend\n\t\telse\n\t\t\tputs \"Skipping #{pathRoot}, file not found\";\n\t\tend\n\tend\n\n\treturn theFiles;\n\nend", "def glob(*patterns)\n selection = []\n patterns.each do |pattern|\n selection.concat(Dir.glob(File.join(path, pattern))) \n end\n selection\n end", "def all_paths\n paths = Dir[\"#{@path}/*\"]\n paths = paths.select(&matcher(options[:accept])) if options[:accept]\n paths = paths.reject(&matcher(options[:reject])) if options[:reject]\n paths\n end", "def glob(pattern, &block)\n result = @lock.with_read_lock { @paths.flat_map { |path| path.glob(pattern, &block) }}\n result unless block\n end", "def select_default_ignore_patterns\n @exclude_patterns = DEFAULT_IGNORE_PATTERNS.dup\n end", "def ignore_exts\n @ext_rules.reject\n end", "def files(folder, ignore)\n all = Dir.glob(folder + \"/**/*\")\n ignore.each {|i| all = all - Dir.glob(i)}\n \n all\n end", "def add_extra_files(*files)\n files.map! {|f| f.include?(\"*\") ? Dir.glob(f) : f }.flatten!\n files.each do |file|\n raise Errno::ENOENT, \"Could not find extra file: #{file}\" unless File.file?(file)\n options[:files] << file\n end\n end", "def matches_for_glob(glob) # TODO: rename?\n glob = File.join(self.lib_dirs_glob, glob)\n\n Dir[glob].map {|f| f.tap(&Gem::UNTAINT) } # FIX our tests are broken, run w/ SAFE=1\n end", "def file_patterns\n [@file_patterns].flatten.compact.uniq\n end", "def result_paths( pattern = '*' )\n paths = nil\n Dir.chdir(dir) {\n paths = Dir.glob(pattern).map {|x| Pathname.new(x).expand_path }\n }\n # remove directories of Analysis\n anl_dirs = analyses.map {|anl| /^#{anl.dir.to_s}/ }\n paths.reject do |path|\n anl_dirs.find {|anl_dir| anl_dir =~ path.to_s }\n end\n end", "def ignore(regexps)\n @options[:ignore] = [options[:ignore], regexps]\n registry[:silencer] = Silencer.new(self)\n end", "def declare_chefignore_patterns\n @flavor.class.do_declare_resources do\n if snippet?('standard_ignore')\n %w(\n Gemfile Gemfile.lock Rakefile Berksfile Berksfile.lock\n ).each do |e|\n chefignore_patterns << e\n end\n end\n end\n end", "def expound_paths(*entries)\n paths = []\n\n entries.each do |entry|\n entry = entry.strip\n\n next if entry.empty?\n next if entry.start_with?('#')\n\n if File.directory?(entry)\n if library_path?(entry)\n paths << entry\n else\n if File.directory?(File.join(entry, 'gems'))\n subpaths = Dir.glob(File.join(entry, 'gems/*/'))\n else\n subpaths = Dir.glob(File.join(entry, '*/'))\n end\n subpaths.each do |subpath|\n paths << subpath if library_path?(subpath)\n end\n end\n elsif File.file?(entry)\n paths.concat(expound_paths(*File.readlines(entry)))\n else\n glob_paths = Dir.glob(entry)\n if glob_paths.first != entry\n paths.concat(expound_paths(*glob_paths))\n end\n end\n end\n\n paths\n end", "def exclude(*files)\n @paths[''].exclude *files\n self\n end", "def add_files(*files)\n @source_paths &= files\n end", "def monitored_paths\n paths = Dir['**/*'].select do |path|\n @script.patterns.any? {|p| path.match(p) }\n end\n paths.push(@script.path).compact!\n paths.map {|path| Pathname(path).expand_path }\n end", "def traverse_files\n result = []\n paths = config[:paths].select { |p| File.exist?(p) }\n if paths.empty?\n log_warn \"search.paths #{config[:paths].inspect} do not exist\"\n return result\n end\n Find.find(*paths) do |path|\n is_dir = File.directory?(path)\n hidden = File.basename(path).start_with?('.')\n not_incl = config[:include] && !path_fnmatch_any?(path, config[:include])\n excl = path_fnmatch_any?(path, config[:exclude])\n if is_dir || hidden || not_incl || excl\n Find.prune if is_dir && (hidden || excl)\n else\n result << yield(path)\n end\n end\n result\n end", "def clear_ignore_patterns\n\t@exclude_patterns = [ /^$/ ]\n end", "def expanded_paths(patterns, options = {})\n return [] if patterns.empty?\n path_list.glob(patterns, options).flatten.compact.uniq\n end", "def files\n @files = Dir[File.join(root(:site), '**', '*')].inject([]) do |a, match|\n # Make sure its the canonical name\n path = File.expand_path(match)\n file = path.gsub /^#{Regexp.escape root(:site)}\\/?/, ''\n ext = File.extname(file)[1..-1]\n \n if ignored_files.include?(path) or File.directory?(match)\n # pass\n elsif not get_renderer(ext).nil? # Has a renderer associated\n fname = file.chomp(\".#{ext}\")\n fname += get_renderer(ext).default_ext unless File.basename(fname).include?('.')\n a << fname\n else\n a << file\n end\n a\n end\n end", "def exclude(*files)\n @exclude += files.flatten\n self\n end", "def filter_matched_files\n matched_files = []\n\n unless file_extensions.empty?\n extensions = file_extensions.reduce do |total, extension|\n total + \"|\" + extension.downcase\n end\n extensions_regex = \"^(.+\" + extensions + \")$\"\n (git.modified_files + git.added_files).each do |file|\n matched_files += [file] unless file.downcase.match(extensions_regex).nil?\n end\n end\n\n unless file_patterns.empty?\n (git.modified_files + git.added_files).each do |line|\n file_patterns.each do |pattern|\n matched_files += [line] unless line.downcase.match(pattern.downcase).nil?\n end\n end\n end\n\n return [matched_files].flatten.compact\n end", "def whitelist_files\n @whitelist_files ||= []\n end", "def glob\n case @raw_image_files.first.filename\n when /^E.*dcm$/\n return 'E*.dcm'\n when /\\.dcm$/\n return '*.dcm'\n when /^I\\./\n return 'I.*'\n when /^I/\n return 'I*.dcm'\n when /.*\\.\\d{3,4}/\n return '*.[0-9]*'\n when /\\.0/\n return '*.0*'\n else\n return nil\n end\n # Note - To exclude just yaml files we could also just use the bash glob\n # '!(*.yaml), but we would have to list all exclusions. This may turn\n # out easier in the long run.\n end", "def ignore!(regexps)\n @options.delete(:ignore)\n @options[:ignore!] = regexps\n registry[:silencer] = Silencer.new(self)\n end", "def file(glob)\n \"**/#{glob}\"\n end", "def excluded_files\n # TODO: also append files marked as %{exclude} (or handle elsewhere?)\n missing_files_for(upstream_gem)\n end", "def files\n @files ||= lambda {\n sorted_relevant_files = []\n\n file_globs.each do |glob|\n current_glob_files = Pathname.glob(glob)\n relevant_glob_files = relevant_files & current_glob_files\n\n relevant_glob_files.map! do |file|\n File.new(path: file,\n namespaces: namespaces,\n decryption_keys: decryption_keys,\n encryption_keys: encryption_keys,\n signature_name: signature_name)\n end\n\n sorted_relevant_files += relevant_glob_files\n end\n\n sorted_relevant_files.uniq\n }.call\n end", "def files( env )\n return env[:files] if env.include? :files\n raise(ArgumentError, \"env hash must include either :files or :path\") unless env.include? :path\n\n file_glob = self.class.class_eval { @file_pattern } || '*'\n Dir.glob(File.join(env[:path], '**', file_glob))\n end", "def glob(pattern, flags = T.unsafe(nil)); end" ]
[ "0.7524156", "0.72494334", "0.7089483", "0.70458", "0.69992214", "0.6967921", "0.6935916", "0.69272983", "0.69156355", "0.6856292", "0.68418306", "0.6792441", "0.67896205", "0.66566366", "0.6635084", "0.66258883", "0.65988934", "0.65887755", "0.65515596", "0.6526204", "0.64827645", "0.64544165", "0.6410247", "0.6401265", "0.6397548", "0.63891935", "0.6343267", "0.63284075", "0.63255805", "0.6322002", "0.6306643", "0.6281055", "0.6278423", "0.6276156", "0.62616754", "0.6220604", "0.6176166", "0.61533755", "0.6142175", "0.612291", "0.61132836", "0.61023694", "0.6086168", "0.6063473", "0.6050906", "0.60313743", "0.6028098", "0.6016035", "0.597557", "0.5957746", "0.5955327", "0.5936535", "0.59273964", "0.59206706", "0.5912309", "0.5906452", "0.59002227", "0.5899369", "0.5870236", "0.5851388", "0.5838086", "0.58307153", "0.5824075", "0.58159196", "0.5814677", "0.58079267", "0.58058256", "0.5804543", "0.5789771", "0.5787901", "0.57804847", "0.57698584", "0.5765067", "0.5762598", "0.5757365", "0.575127", "0.57484984", "0.57430935", "0.57388026", "0.5738789", "0.5736786", "0.5706005", "0.56987256", "0.5695793", "0.5691748", "0.56892675", "0.56860894", "0.56829125", "0.56674975", "0.5666604", "0.5664284", "0.5664124", "0.5655989", "0.5646494", "0.5635425", "0.5632669", "0.56300724", "0.5628901", "0.5628164", "0.56217825" ]
0.7656717
0
Replace globs in ignore list. globs List of file globs. [Array] Returns [Array]
def ignore!(*globs) @ignore.replace(globs) @ignore end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ignore(*globs)\n @ignore.concat(globs) unless globs.empty?\n @ignore\n end", "def process_globs globs\n result = globs.flat_map do |glob|\n Dir[File.join directory, glob]\n .map{ |f| f.gsub(/\\\\/, '/') }\n .select { |f| File.file?(f) }\n end\n result\n end", "def process_exclusions(globs); end", "def amass(include_globs, exclude_globs=[], ignore=[])\n include_globs = include_globs.map{ |f| File.directory?(f) ? File.join(f, '**/*') : f } # Recursive!\n exclude_globs = exclude_globs.map{ |f| File.directory?(f) ? File.join(f, '**/*') : f } # Recursive!\n include_files = include_globs.flatten.map{ |g| Dir.glob(g) }.flatten.uniq\n exclude_files = exclude_globs.flatten.map{ |g| Dir.glob(g) }.flatten.uniq\n files = include_files - exclude_files\n files = files.reject{ |f| ignore.any?{ |x| File.fnmatch?(x, File.basename(f)) } }\n files\n end", "def ignored\n [\n '.agignore',\n '.cvsignore',\n '.gitignore',\n '.hgignore',\n ].map do |file_with_ignore_patterns|\n if File.exist? file_with_ignore_patterns\n patterns = File.read(file_with_ignore_patterns).split(\"\\n\")\n patterns.map do |pattern|\n next if pattern =~ /^#/\n next if pattern =~ /^\\s*$/\n \"-not \\\\( -path \\\"*#{pattern}*\\\" -prune \\\\)\"\n end.compact.join(' ')\n else\n ''\n end\n end.join(' ') + [\n \"-not \\\\( -path \\\"*\\\\.git*\\\" -prune \\\\)\"\n ].join(' ')\nend", "def process_globs(globs); end", "def ignore(*globs)\n @watchlist.ignore(globs) unless globs.empty?\n end", "def ignore!(*globs)\n @watchlist.ignore!(globs)\n end", "def appendGlobs(arr, patterns); patterns.each { |p| arr << Dir.glob(p) }; arr.flatten end", "def hg_ignore_files( *pathnames )\n\t\t\tpatterns = pathnames.flatten.collect do |path|\n\t\t\t\t'^' + Regexp.escape(path) + '$'\n\t\t\tend\n\t\t\ttrace \"Ignoring %d files.\" % [ pathnames.length ]\n\n\t\t\tIGNORE_FILE.open( File::CREAT|File::WRONLY|File::APPEND, 0644 ) do |fh|\n\t\t\t\tfh.puts( patterns )\n\t\t\tend\n\t\tend", "def ignore_files=(patterns)\n @finder.add_patterns(patterns)\n end", "def ignored_files\n all_files.select { |f| ignore_matcher.matched?(f) }\n end", "def glob(*patterns)\n require \"pathname\"\n patterns.reduce([]) do |result, pattern|\n if pattern.end_with?(\"/**\")\n pattern += \"/\"\n end\n result += Dir.glob(pattern).map do |path|\n Pathname.new(path.gsub(\"\\\\\", \"/\")).cleanpath.to_s\n end\n end.sort\n end", "def safe_glob(dir, patterns, flags = T.unsafe(nil)); end", "def ignore(*paths)\n paths.each do |path|\n if path.is_a? Array\n config.ignore.concat path\n else\n config.ignore << path\n end\n end \n end", "def git_ignore(*paths)\n paths.each {|path| append_to_file '.gitignore', \"#{path}\\n\"}\n end", "def ignores(*file_paths)\n self.ignore_paths += file_paths\n end", "def no_extension_files(base_dir, wildcard, non_exts = [])\n list = []\n unless non_exts.empty?\n list = Dir.glob(File.join(base_dir, wildcard, \"{#{non_exts.join(',')}}\"))\n end\n list\n end", "def process_exclusions globs\n remainder = globs.select do |glob|\n if glob_is_directory?(glob)\n exdir = File.join(directory, glob_to_directory(glob))\n included.delete_if { |file| file.start_with?(exdir) }\n false\n else\n true\n end\n end\n process_globs remainder\n end", "def glob(*globs)\n files.find_all do |path|\n globs.any? { |pattern| File.fnmatch(pattern, path, File::FNM_EXTGLOB) }\n end\n end", "def ignore(*patterns)\n @ignore_patterns += patterns\n end", "def ignore_files_with_regexps(files, regexps)\n files.select { |f| regexps.all? { |r| r.match(f.file).nil? } }\n end", "def glob(path, options = {})\n path = File.expand_path(path)\n if File.file?(path) || options[:no_recurse] == true\n [path]\n else\n [path] + Dir.glob(\"#{path}/**/*\", File::FNM_DOTMATCH).reject{|f| IMPLIED_DIR_ENTRIES =~ f }\n end\n end", "def ignore_includes\n ret = []\n ret << \"_includes/jekyll/**/*\"\n ret << [\"_includes/styleguide\", \"_includes/styleguide/**/*\"]\n ret << \"_includes/*.html\"\n ret << [\"_includes/atoms/figure\", \"_includes/atoms/figure/*\"]\n ret << [\"_includes/atoms/sanitize.html\", \"_includes/atoms/imagetitle.html\", \"_includes/atoms/classname.html\"]\n ret\n end", "def apply_exclusions paths\n paths.reject do |path|\n excludes.any? do |pattern|\n globs = pattern.split(\"\\\\\")\n components = path.split(\"/\")\n\n # Inno Setup includes a feature in which you can anchor excludes at\n # the root by starting the exclude with a \"\\\". Since I don't want\n # to make this more complicated than I have to, I'm not implementing\n # this feature at this time.\n if globs[0] == \"\"\n raise \"Can't handle anchored exclude #{pattern}\"\n end\n \n globs_match_strings_anywhere? globs, components\n end\n end\n end", "def svn_ignore_files( *pathnames )\n\tpathnames.flatten!\n\n\tmap = pathnames.inject({}) do |map,path|\n\t\tmap[ path.dirname ] ||= []\n\t\tmap[ path.dirname ] << path.basename\n\t\tmap\n\tend\n\n\ttrace \"Ignoring %d files in %d directories.\" % [ pathnames.length, map.length ]\n\n\tmap.each do |dir, files|\n\t\ttrace \" %s: %p\" % [ dir, files ]\n\t\tio = open( '|-' ) or exec 'svn', 'pg', 'svn:ignore', dir\n\t\tignorelist = io.read.strip\n\t\tignorelist << \"\\n\" << files.join(\"\\n\")\n\t\tsystem 'svn', 'ps', 'svn:ignore', ignorelist, dir\n\tend\nend", "def select_default_ignore_patterns\n\t@exclude_patterns = DEFAULT_IGNORE_PATTERNS.dup\n end", "def files() = files_path.glob('**/*')", "def dont_reload(*glob)\n Dir[*glob].each { |path| Watcher::List.for(self).ignore(path) }\n end", "def file_list(pattern)\n FileList[pattern].tap do |list|\n list.exclude 'vendor/**/*', # bundler\n 'pkg/**/*', # gem build process\n 'spec/fixtures/**/*' # puppetlabs fixtures\n list.reject! { |f| File.directory? f }\n end\n end", "def excludes(*paths)\n self.excluded_files.concat(expand_globs(paths))\n end", "def files(folder, ignore)\n all = Dir.glob(folder + \"/**/*\")\n ignore.each {|i| all = all - Dir.glob(i)}\n \n all\n end", "def excluded_files() = []", "def select_default_ignore_patterns\n @exclude_patterns = DEFAULT_IGNORE_PATTERNS.dup\n end", "def glob_changed(*globs)\n # FIXME: Implement properly once changed detection is available.\n glob(*globs)\n end", "def glob(*patterns)\n selection = []\n patterns.each do |pattern|\n selection.concat(Dir.glob(File.join(path, pattern))) \n end\n selection\n end", "def ignore_assets\n ret = []\n ret << [\"assets/styles/atoms/flex\", \"assets/styles/atoms/flex/*\"]\n ret << [\"assets/styles/styleguide\", \"assets/styles/styleguide/**/*\"]\n ret << \"assets/styles/**/*.liquid\"\n ret << \"assets/styles/**/*.css\"\n ret << \"assets/styles/*.scss\"\n ret\n end", "def clean_paths\n cached_used = used_files\n glob_options = File::FNM_DOTMATCH | File::FNM_CASEFOLD\n files = Pathname.glob(root + \"**/*\", glob_options).map(&:to_s)\n\n files.reject! do |candidate|\n candidate = candidate.downcase\n candidate.end_with?('.', '..') || cached_used.any? do |path|\n path = path.downcase\n path.include?(candidate) || candidate.include?(path)\n end\n end\n files\n end", "def glob(pattern, flags = T.unsafe(nil)); end", "def matches_for_glob(glob) # TODO: rename?\n glob = File.join(self.lib_dirs_glob, glob)\n\n Dir[glob].map {|f| f.tap(&Gem::UNTAINT) } # FIX our tests are broken, run w/ SAFE=1\n end", "def file_glob\n if file_types.nil?\n '*'\n else\n \"*{#{file_types.join(',')}}\"\n end\n end", "def ignore(*paths)\n @to_copy -= paths.flatten\n end", "def generate_glob_filter(glob)\n # Negative glob starts with '!'\n negative = glob.start_with?('!')\n # Strip leading '!' then\n glob.remove_prefix!('!') if negative\n lambda do |path|\n matches = File.fnmatch(glob, path, GLOB_MATCH_MODE)\n # inverse match if glob is negative\n matches = !matches if negative\n matches\n end\n end", "def clear_ignore_patterns\n\t@exclude_patterns = [ /^$/ ]\n end", "def ignored_files=(_arg0); end", "def glob(*args)\n Glob.new(self, *args)\n end", "def glob(*args)\n Glob.new(self, *args)\n end", "def excludes\n return Dir.chdir( base ) {\n %w(. .. .svn .git .hg CVS Rakefile) + files_for_erb +\n Dir.glob( '*~' ) + Dir.glob( '#*#' ) + Dir.glob( '*.bak' )\n }\n end", "def glob(pattern, &block)\n result = @lock.with_read_lock { @paths.flat_map { |path| path.glob(pattern, &block) }}\n result unless block\n end", "def ignore_paths\n Dir.glob(\"**/*\").select { |f| File.directory? f }\n .collect { |name| \"#{name}/\" }\n - [\"app/\",\n \"app/views/\",\n \"app/views/branded/\",\n \"app/views/branded/public_pages/\",\n \"app/views/branded/home/\",\n \"app/views/branded/contact_us/\",\n \"app/views/branded/contact_us/contacts/\",\n \"app/views/branded/shared/\",\n \"app/views/branded/layouts/\",\n \"app/views/branded/static_pages/\"]\nend", "def prepare_exception_globs(exceptions_file, source_dir)\n source_path = File.expand_path(source_dir)\n globs = []\n File.open(exceptions_file) do |file|\n file.each do |line|\n glob = line.strip\n if (glob.length > 0) && (glob[0..0] != '#')\n globs << \"#{source_path}/#{glob}\".gsub('//', '/')\n end\n end\n end\n return globs\n end", "def clear_ignore_patterns\n @exclude_patterns = [ /^$/ ]\n end", "def glob\n \"**/*\"\n end", "def files\n @files ||= Dir.glob(File.join(@root, '**/*'), GLOB_FLAGS).select do |path|\n File.file?(path) && path !~ IGNORE_REGEX\n end\n end", "def glob_match (filenames, pattern)\n\t# Escape the '*', '?', and '.' characters\n\tpattern.gsub!(/[\\*\\?\\.]/, '*' => '.*', '?' => '.', '.' => '\\.') \t\n\tregex = Regexp.new(pattern)\n\t#select returns a new array\n\tfilenames.select do |filename|\n\t\tfilename =~ regex\n\tend\nend", "def glob; end", "def glob(*args, &block); end", "def ignore_paths(options)\n source = options['source']\n destination = options['destination']\n config_files = Configuration[options].config_files(options)\n paths = config_files + Array(destination)\n ignored = []\n\n source_abs = Pathname.new(source).expand_path\n paths.each do |p|\n path_abs = Pathname.new(p).expand_path\n begin\n rel_path = path_abs.relative_path_from(source_abs).to_s\n ignored << Regexp.new(Regexp.escape(rel_path)) unless rel_path.start_with?('../')\n rescue ArgumentError\n # Could not find a relative path\n end\n end\n ignored\n end", "def glob_match(filenames, pattern)\n\t\n\tnewPattern = pattern.gsub( '*', '.*').gsub( '?', '.')\n\n\treturn filenames.select{|i| i.match(/#{newPattern}/)}\n\t\nend", "def regexify(glob)\n glob.gsub! '.', '\\\\.'\n rx = glob.split '*'\n rs = '^' + rx[0]\n rs << '.*'\n rs << rx[-1] if rx.length == 2\n rs << '$'\n Regexp.new rs\nend", "def glob(*args, &block)\n @entry_set.glob(*args, &block)\n end", "def ignoring\n %w{*_test.lua *_spec.lua .*}\n end", "def filter(files)\n ruleset.watchlist.filter(files)\n #case ruleset.ignore\n #when Ignore\n # ruleset.ignore.filter(list)\n #when Array\n # list.reject!{ |path| ignore.any?{ |ig| /^#{ig}/ =~ path } }\n #else\n # list\n #end\n end", "def ignore=(ignored_array)\n @ignore = (@ignore + ignored_array).uniq\n end", "def glob\n case @raw_image_files.first.filename\n when /^E.*dcm$/\n return 'E*.dcm'\n when /\\.dcm$/\n return '*.dcm'\n when /^I\\./\n return 'I.*'\n when /^I/\n return 'I*.dcm'\n when /.*\\.\\d{3,4}/\n return '*.[0-9]*'\n when /\\.0/\n return '*.0*'\n else\n return nil\n end\n # Note - To exclude just yaml files we could also just use the bash glob\n # '!(*.yaml), but we would have to list all exclusions. This may turn\n # out easier in the long run.\n end", "def glob pattern\n Dir[File.join(@originals,pattern)].collect do |f|\n File.basename(f)\n end\n end", "def tracked_files\n all_files.reject { |f| ignore_matcher.matched?(f) }\n end", "def except(files,patterns)\n if !patterns.kind_of?(Array)\n patterns = [patterns]\n end\n files.select do |file|\n matches = true\n patterns.each do |pattern|\n if File.fnmatch(pattern,file)\n matches = false\n break\n end\n end\n matches\n end\n end", "def ignore_only=(names)\n @ignore = [names].flatten\n end", "def ignore(regexps)\n @options[:ignore] = [options[:ignore], regexps]\n registry[:silencer] = Silencer.new(self)\n end", "def exclude_ignored_paths(dirs, ignore_paths = self.ignore_paths)\n Dir.glob(dirs.map { |d| \"#{d.sub(%r{/+$}, '')}/*\" }, File::FNM_DOTMATCH).reject do |path|\n ignore_paths.include?(File.basename(path))\n end\n end", "def file_list(path, ext = 'scss', remover = '')\n # Necessary so that directories aren't counted\n collect_path = path.include?(\"*\") ? path : \"#{path}/**/*.#{ext}\"\n # Remove first slash from path if present. probably a better way to do this.\n Dir[collect_path].collect { |file| file.gsub(remover, '').gsub(/^\\/app\\//, 'app/') if File.file?(file) }\n end", "def files\n Dir.glob(\"#{path}/*.mp3\").collect do\n |file| file.gsub(\"#{path}/\",\"\")\n end\n end", "def all_files_except_git\n Dir.glob('*', File::FNM_DOTMATCH).delete_if { |file| file =~ /\\A\\.{1,2}\\z|\\A\\.git\\z/ }\n end", "def to_glob(glob)\n return Fileset.new if glob.nil?\n return Fileset.new(glob.map { |g| to_glob(g) }) if glob.is_a? Array\n return Fileset.from_glob(@workdir + glob) if @workdir\n Fileset.from_glob(glob)\n end", "def exclude(*files)\n files = to_artifacts(files)\n @excludes |= files\n @excludes |= files.reject { |f| f =~ /\\*$/ }.map { |f| \"#{f}/*\" }\n self\n end", "def exclude(*files)\n @exclude += files.flatten\n self\n end", "def ovl_glob(rel_pattern)\n gem_files = Dir.glob(File.join(GEM_ROOT, rel_pattern)).map do |path|\n path.sub(GEM_ROOT+\"/\", \"\")\n end\n\n (gem_files + Dir.glob(rel_pattern)).uniq\n end", "def ovl_glob(rel_pattern)\n gem_files = Dir.glob(File.join(GEM_ROOT, rel_pattern)).map do |path|\n path.sub(GEM_ROOT+\"/\", \"\")\n end\n\n (gem_files + Dir.glob(rel_pattern)).uniq\n end", "def ignore!(*regexps)\n directories_records.each { |r| r.ignore!(*regexps) }\n self\n end", "def ignore(*regexps)\n directories_records.each { |r| r.ignore(*regexps) }\n self\n end", "def glob(match)\n paths = Array.new(match) # Force array-ness\n\n paths.map! do |spec|\n if spec.include?('*')\n files.select do |file, _|\n # Dir#glob like source matching\n File.fnmatch?(spec, file, File::FNM_PATHNAME | File::FNM_DOTMATCH)\n end.sort\n else\n [spec, files[spec]]\n end\n end\n\n Hash[*paths.flatten]\n end", "def declare_chefignore_patterns\n @flavor.class.do_declare_resources do\n if snippet?('standard_ignore')\n %w(\n Gemfile Gemfile.lock Rakefile Berksfile Berksfile.lock\n ).each do |e|\n chefignore_patterns << e\n end\n end\n end\n end", "def unglobify(glob)\n chars = glob.split(\"\")\n\n chars = smoosh(chars)\n\n curlies = 0\n escaping = false\n string = chars.map do |char|\n if escaping\n escaping = false\n char\n else\n case char\n when \"**\"\n \"([^/]+/)*\"\n when '*'\n \".*\"\n when \"?\"\n \".\"\n when \".\"\n \"\\.\"\n\n when \"{\"\n curlies += 1\n \"(\"\n when \"}\"\n if curlies > 0\n curlies -= 1\n \")\"\n else\n char\n end\n when \",\"\n if curlies > 0\n \"|\"\n else\n char\n end\n when \"\\\\\"\n escaping = true\n \"\\\\\"\n else\n char\n end\n end\n end\n\n '(\\A|\\/)' + string.join + '\\Z'\n end", "def glob(pattern, *args)\n Dir.glob(pattern, *args).sort\n end", "def file_patterns\n [@file_patterns].flatten.compact.uniq\n end", "def glob(*pat, &blk)\n regexes = pat.flatten.map {|pat| Bun.convert_glob(pat) }\n enum = self.class.new(@collection) do |yielder|\n self.each do |fname|\n # TODO Refactor with any?\n matched = false\n regexes.each do |regex|\n if fname =~ regex\n matched = true\n break\n end\n end\n yielder << fname if matched\n end\n end\n if block_given?\n enum.each(&blk)\n else\n enum\n end\n end", "def track_files(glob); end", "def clean_file_list( args )\n\t\tdirtyfiles = args\n\n\t\t# only allow .mp3 files into the clean list.\n\t\tfiles = Array.new\n\t\tdirtyfiles.each { |x|\n\t\t\tif ( x =~ /.*\\.[mM][pP]3$/ )\n\t\t\t\tfiles.push( x )\n\t\t\telse\n\t\t\t\tputs \"\\tWARNING: No .mp3 suffix in \\\"#{x}\\\"! *** skipping! ***\"\n\t\t\tend\n\t\t}\n\n\t\tfiles\n\tend", "def files_array(files)\n return [] unless files\n files.is_a?(Array) ? files : pattern_to_filelist(files.to_s)\n end", "def load(*globs)\n skips = globs.grep(/^-/)\n (globs - skips).each do |glob|\n glob += '.rb' if glob =~ /\\*$/\n Dir.glob(glob).sort.each do |path|\n next unless File.file? path\n next if skips.find {|pat| path =~ /#{pat[1..-1]}$/}\n instance_eval(File.read(path), path)\n end\n end\n end", "def filter_matched_files\n matched_files = []\n\n unless file_extensions.empty?\n extensions = file_extensions.reduce do |total, extension|\n total + \"|\" + extension.downcase\n end\n extensions_regex = \"^(.+\" + extensions + \")$\"\n (git.modified_files + git.added_files).each do |file|\n matched_files += [file] unless file.downcase.match(extensions_regex).nil?\n end\n end\n\n unless file_patterns.empty?\n (git.modified_files + git.added_files).each do |line|\n file_patterns.each do |pattern|\n matched_files += [line] unless line.downcase.match(pattern.downcase).nil?\n end\n end\n end\n\n return [matched_files].flatten.compact\n end", "def expanded_paths(patterns, options = {})\n return [] if patterns.empty?\n path_list.glob(patterns, options).flatten.compact.uniq\n end", "def getListSortWhitoutIgnoredFiles(listPath)\n newListPath = listPath.clone()\n if (!getIgnoreFile().nil?)\n getIgnoreFile().each do | ignoreFile |\n listPath.each do | path |\n if !ignoreFile.nil? and path.match(ignoreFile + \"$\")\n newListPath.delete(path)\n end\n end\n end\n end\n return newListPath\n end", "def ignored_names(ignored)\n modules = Config.read(\"modules\") || []\n ignored.map { |i| modules[i] || \"\" }\n end", "def declare_chefignore_patterns\n @flavor.class.do_declare_resources do\n chefignore_patterns << 'Guardfile' if snippet?('standard_ignore')\n end\n end", "def all_files() = path.glob('**/*').select(&:file?).map(&:to_s)", "def glob=(_arg0); end", "def file(glob)\n \"**/#{glob}\"\n end", "def files\n # list_of_filenames = Dir.entries(path)\n @list_of_filenames = Dir.glob(\"#{@path}/*.mp3\").collect! {|x| x.gsub(\"#{@path}/\", \"\") }\n # binding.pry\n end" ]
[ "0.78354275", "0.72956806", "0.7157316", "0.6982517", "0.69178647", "0.6886567", "0.68079126", "0.6795027", "0.6770556", "0.6757173", "0.67300713", "0.6569321", "0.6523855", "0.64728034", "0.6457298", "0.64252776", "0.6421378", "0.64094037", "0.6372147", "0.6358228", "0.6331173", "0.6317407", "0.6288957", "0.6284469", "0.6252762", "0.62332976", "0.6192034", "0.61796004", "0.61768746", "0.6166685", "0.61580503", "0.61471117", "0.6145415", "0.614112", "0.6133109", "0.61069095", "0.6104918", "0.60725135", "0.605942", "0.6021657", "0.6016174", "0.60145974", "0.59790266", "0.5973352", "0.59679794", "0.5942701", "0.5942701", "0.5942137", "0.5919331", "0.59034884", "0.5901759", "0.588902", "0.5887581", "0.5887413", "0.5855373", "0.58490384", "0.58320343", "0.5798389", "0.57864076", "0.5783226", "0.5778111", "0.5767294", "0.5766028", "0.5752676", "0.57412064", "0.5729476", "0.5727368", "0.5716075", "0.5697188", "0.5683753", "0.5680802", "0.5675039", "0.56710875", "0.56605303", "0.5659847", "0.56442153", "0.5636143", "0.5633443", "0.5633443", "0.5631055", "0.56282485", "0.56275785", "0.56242025", "0.5618709", "0.56182444", "0.56146544", "0.5609463", "0.56074494", "0.55919194", "0.55911446", "0.5575105", "0.5568976", "0.5567653", "0.5558097", "0.5551906", "0.55404496", "0.55267274", "0.55261207", "0.550794", "0.55051744" ]
0.8189704
0
Home directory. Returns [String]
def home @home ||= File.expand_path('~') end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def home_path\n File.expand_path(\"~\")\n end", "def home_basedir\n if platform_family?('mac_os_x')\n '/Users'\n elsif platform_family?('solaris2')\n '/export/home'\n else\n '/home'\n end\n end", "def home(user=nil)\n File.expand_path(Dir.home(user))\n end", "def home_path\n result = tmp_path.join(\"home\")\n FileUtils.mkdir_p(result)\n result\n end", "def home_dir(name=nil)\n username = (name || self.username)\n if (username == \"root\")\n \"/root\"\n else\n \"/home/#{username}\"\n end\n end", "def home\n ENV['HOME'] || File.expand_path('~')\n end", "def home_path\n @ssh_home\n end", "def server_home\n FilePath.new(@build_dir, \"homes\", \"server\").ensure_directory\n end", "def user_directory\n File.expand_path('~')\n end", "def home\n env.home\n end", "def get_homedir(user)\n begin\n Etc.getpwnam(\"#{user}\")[\"dir\"].chomp\n # or with dscl\n #homedir = %x(dscl . -read /users/#{user} NFSHomeDirectory).gsub(/NFSHomeDirectory: /,\"\")\n rescue Exception => e\n end \n end", "def home\n @home ||= ENV.fetch(\"HOME\").tr(\"\\\\\", \"/\")\nend", "def home\n Pathname.new(node.chopped.nginx.nginx_home)\n end", "def default_or_home_directory\n hd = TyrantManager.home_dir\n begin\n hd = TyrantManager.default_directory\n rescue => e\n # yup, using home\n end\n return hd\n end", "def home(subcomponent = \"\")\n File.expand_path(\"#{@basepath}/#{subcomponent}\")\n end", "def home_file(*path)\n File.join(ENV['HOME'], *path)\nend", "def home_file(*path)\n File.join(ENV['HOME'], *path)\nend", "def abspath path\n if path[0] != '/'\n @homedir + '/' + path\n else\n path\n end\n end", "def git_root_till_home\n Pathname.new(Dir.pwd).ascend do |dir_pathname|\n return dir_pathname if File.directory?(\"#{dir_pathname}/.git\")\n return nil if dir_pathname.to_s == home_dir\n end\n end", "def user_install_dir\n eval_path(find_val('user_install_dir') || home_dir)\n end", "def infer_homedir(username)\n\t# mitigate blank username rm -rf /network/home/\n\tif username.blank?\n\t\traise(\"Username cannot be empty!\")\n\t# mitigate path traversal rm -rf /network/home/../../etc/shadow\n\telsif username !~ /\\A[\\w\\.-]+\\z/\n\t\traise(\"Invalid format of username\")\n\tend\n\n\tFile.join($home_base, username)\nend", "def detect_home(set_if_missing=false)\n if(ENV['HOME'] && Pathname.new(ENV['HOME']).absolute?)\n ENV['HOME']\n else\n home = File.directory?('/root') && File.writable?('/root') ? '/root' : '/tmp'\n if(set_if_missing)\n ENV['HOME'] = home\n end\n home\n end\n end", "def system_home\n ENV.fetch(\"HOME\").tr(\"\\\\\", \"/\")\nend", "def solr_home\n File.join(configuration.solr_home)\n end", "def solr_home\n File.join(configuration.solr_home)\n end", "def default_path\n Gem.default_path + [@home]\n end", "def solr_home\n File.join(FileUtils.pwd, 'solr')\n end", "def homedir(user = (@sudo||@user))\n exec! \"awk -F: -v v=#{escape(user)} '{if ($1==v) print $6}' /etc/passwd\"\n end", "def home\n @home ||= \"#{site.local_url}\"\n end", "def tools_home\n FilePath.new(@build_dir, \"homes\", \"tools\").ensure_directory\n end", "def get_home_path(dependency)\n dep_home = ENV[\"#{dependency.upcase}_HOME\"] ||\n @config.xpath(\"deps/dep[@name = '#{dependency}']/home/text()\").first\n raise %Q{Please specify the home directory for #{dependency} by using\n <dep name=\\\"#{dependency}\\\">\n <home>#{File.join(\"path\", \"to\", dependency, \"home\")}</home>\n </dep>} unless dep_home\n missing_error = \"Directory #{dep_home}, to which #{dependency.capitalize} home is set, doesn't exist.\"\n raise missing_error unless File.directory? dep_home\n dep_home\n end", "def home(username)\n send_request(FXP_EXTENDED, :string, \"home-directory\", :string, username)\n end", "def workspace_folder\n @pwd\n end", "def dir\n ENV['XDG_CONFIG_HOME'] || File.join(home, '.config')\n end", "def home_path_for(sling)\n if ( @path == nil )\n props = sling.get_node_props(user_url)\n @path = props[\"path\"]\n end\n return \"/_user\"+@path\n end", "def app_dir\n File.join(home_dir, \"#{sanitized_app_name}.#{name}\")\n end", "def home_path\n \"/\"\n end", "def solr_home\n @solr_home ||=\n if user_configuration_from_key('solr', 'solr_home')\n user_configuration_from_key('solr', 'solr_home')\n else\n File.join(::Rails.root, 'solr')\n end\n end", "def root\n Dir.pwd\n end", "def prompt_dir\n Dir.pwd!=`echo $HOME`.chomp ? \"[#{Dir.pwd.split(\"/\")[-1]}]\".blue : \"[~]\".blue\nend", "def home_path\n @heroku_home\n end", "def homify f = ''\n if f.length == 0 then @homedir\n else \"#{@homedir}/#{f}\" end\n end", "def base_path\n Dir.pwd + \"/\"\n end", "def getWorkingDir\n if(@workingDir != nil)\n return @workingDir\n end\n currDir = Dir.pwd\n dr = \"\"\n currDir.split(\"/\").each{ |entry|\n dr = dr+entry+\"/\"\n #puts dr\n if(File.directory? dr+\".hoster\")\n @workingDir = dr+\".hoster\"\n end\n }\n @workingDir\n end", "def modules_home\n FilePath.new(@build_dir, \"modules\").ensure_directory\n end", "def getWorkingDir\n currDir = Dir.pwd\n dr = \"\"\n currDir.split(\"/\").each{ |entry|\n dr = dr+entry+\"/\"\n #puts dr\n if(File.directory? dr+\".hoster\")\n @workingDir = dr+\".hoster\"\n end\n }\n @workingDir\n end", "def root( *args )\n if self.home =~ /^[a-zA-Z]:/\n self.home.to_s[0..3].fwf_filepath.join( *args )\n else\n \"/\".fwf_filepath.join( *args )\n end\n end", "def tildize (path)\n\tpath.gsub(`echo $HOME`.chomp, \"~\")\nend", "def cwd\n return cd(\"\").to_s\n end", "def home_path?(path)\n path[0] == ?~\n end", "def getwd\n Dir.getwd\n end", "def local_yay_path\n\t\t\traise \"ENV[HOME] not found!\" unless ENV['HOME']\n\t\t\treturn \"#{ENV['HOME']}/.yay\"\n\t\tend", "def __detect_project_home__(path, li = '.lithium')\n path = File.expand_path(path)\n prev_path = nil\n while path && prev_path != path\n marker = File.join(path, li)\n break if File.directory?(marker) && File.exists?(marker)\n prev_path = path\n path = File.dirname(path)\n end\n return path != prev_path ? path : nil\nend", "def sys_root\n '/'\n end", "def working_dir\n return nil if !repo || !user\n return \"#{Bini.data_dir}/repos/#{user}/#{repo}\"\n end", "def append_to_home_if_not_absolute( p )\n path = Pathname.new( p )\n unless path.absolute? then\n path = Pathname.new( home_dir ) + path\n end\n return path.to_s\n end", "def current_directory\n File.expand_path @current_directory\n end", "def root\n File.dirname __dir__\n end", "def root\n File.dirname __dir__\n end", "def dir\n @working_directory\n end", "def root\n Pathname.new(File.dirname(__dir__))\n end", "def run_java_home\n `/usr/libexec/java_home`.to_s.strip\n end", "def root_dir\n is_rails? ? Rails.root.to_s : Dir.pwd.to_s\n end", "def pwd\r\n ndev.rpc.command(\"show cli directory\").text.strip\r\n end", "def get_root_directory\n return @@root_directory\n end", "def get_root_directory\n return @@root_directory\n end", "def working_dir\n ENV['PWD'] || Dir.pwd\n end", "def root\n File.dirname __dir__\n end", "def a_dir\n self.path.split('/')[0...-1]\n end", "def win32_system_dir #:nodoc:\n win32_shared_path = ENV[\"HOME\"]\n if win32_shared_path.nil? && ENV[\"HOMEDRIVE\"] && ENV[\"HOMEPATH\"]\n win32_shared_path = ENV[\"HOMEDRIVE\"] + ENV[\"HOMEPATH\"]\n end\n\n win32_shared_path ||= ENV[\"APPDATA\"]\n win32_shared_path ||= ENV[\"USERPROFILE\"]\n raise Win32HomeError,\n \"Unable to determine home path environment variable.\" if\n win32_shared_path.nil? or win32_shared_path.empty?\n normalize(File.join(win32_shared_path, \"Rake\"))\n end", "def applicationFilesDirectory\n file_manager = NSFileManager.defaultManager\n library_url = file_manager.URLsForDirectory(NSLibraryDirectory, inDomains:NSUserDomainMask).lastObject\n library_url.URLByAppendingPathComponent(\"Homebrew\")\n end", "def target_dir\n return @target_dir ? @target_dir : Dir.home\n end", "def root\n find_single_directory || @app_dir\n end", "def _vim_user_dir\n platform_dir = { :UNIX => \"/.vim\", :WINDOWS => \"/vimfiles\" }\n home_dir = Env.determine_home_dir\n user_dir = home_dir + platform_dir[Env.determine_target_os] if home_dir\n return user_dir\n end", "def sourcehome\n site ? site.home : '#'\n end", "def root; Pathname(__dir__).parent; end", "def current_dir; end", "def default_directory\n defaults = [ self.cwd_default_directory,\n self.env_default_directory,\n self.localstate_default_directory ]\n dd = nil\n loop do\n dd = defaults.shift\n break if dd or defaults.empty?\n end\n raise Error, \"No default Tyrant Manager home directory found\" unless dd\n return dd\n end", "def home_url(path = '', scheme = nil)\n get_home_url(nil, path, scheme )\n end", "def make_home_directory( username, skeldir=SKELDIR )\n\t\tself.log.info \"Making home directory for %p, cloned from %s\" % [ username, skeldir ]\n\t\thomedir = HOMEDIR_BASE + username\n\t\traise \"%s: already exists\" % [ homedir ] if homedir.exist?\n\t\traise \"%s: already has an archived homedir\" % [ username ] if\n\t\t\t( ARCHIVE_BASE + username ).exist?\n\n\t\tFileUtils.cp_r( skeldir.to_s, homedir )\n\t\tFileUtils.chown_R( username, nil, homedir )\n\n\t\treturn homedir.to_s\n\tend", "def root_path\n @root_path ||= `git rev-parse --show-toplevel`.chomp\n end", "def app_dir_pathname\n @app_dir_pathname ||= Pathname.new(app_name)\n end", "def directory\n File.dirname(@path) + '/'\n end", "def path\n Pathname.new(\n File.expand_path(\n File.join(Gem.user_home, \".bowline\")\n )\n )\n end", "def prefixed_working_directory\n return self.storage.prefixed_working_directory\n end", "def lookup_root\n pwd = File.expand_path(Dir.pwd)\n home = File.expand_path('~')\n while pwd != '/' && pwd != home\n return pwd if ROOT_INDICATORS.any?{ |r| File.exist?(File.join(pwd, r)) }\n pwd = File.dirname(pwd)\n end\n return nil\n end", "def work_dir; end", "def etc; HOMEBREW_PREFIX+'etc' end", "def untildize (path)\n\tpath.gsub(\"~\", `echo $HOME`.chomp)\nend", "def directory\n File.dirname @path\n end", "def keys_file_path\n return File.join(Dir.home, keys_file_path_from_home)\n end", "def repo_path\n # root dir is system home folder, need to exist prior to app launch\n # /home\n root_dir = Pathname(Settings.application.root_dir)\n\n # base dir is aq_git user home folder, need to exist prior to app launch\n # /home/aq_git\n base_dir = root_dir + Settings.application.repo_user\n\n if self.kind == \"git\"\n repo_path = Settings.application.repo_git_path\n end\n\n # git_dir is where the repositories are gonna be stored, creating if needed\n # /home/aq_git/git or /home/aq_git/hg\n scm_dir = base_dir + repo_path\n scm_dir.mkdir if base_dir.exist? && !scm_dir.exist?\n\n # repo dir is the repository own path\n # /home/aq_git/git/username\n if self.owner\n repo_dir = scm_dir + self.owner.login\n elsif current_user\n repo_dir = scm_dir + current_user.login\n end\n repo_dir.mkdir if !repo_dir.exist?\n\n # the dot dir is the .git (or .hg) located in the repository\n # /home/aq_git/git/username/reposit.git\n if self.is_git?\n dot_dir = repo_dir + (self.name + \".#{self.kind}\")\n dot_dir.mkdir if !dot_dir.exist?\n end\n\n self.path = dot_dir.to_s\n end", "def seven_zip_home\n \"#{windows_path(Chef::Config[:file_cache_path])}\\\\seven_zip_#{node['poise-archive']['seven_zip']['version']}\"\n end", "def home_path_for(sling)\n if ( @path == nil )\n props = sling.get_node_props(group_url)\n @path = props[\"path\"]\n end\n return \"/_group\"+@path\n end", "def win32_cache_dir\r\n unless ENV['HOMEDRIVE'] && ENV['HOMEPATH'] && File.exists?(home = ENV['HOMEDRIVE'] + ENV['HOMEPATH'])\r\n puts \"No HOMEDRIVE or HOMEPATH environment variable. Set one to save a\" +\r\n \"local cache of stylesheets for syntax highlighting and more.\"\r\n return false\r\n else\r\n return File.join(home, '.slideshow')\r\n end\r\n end", "def confdir\n cli_confdir = cli[:confdir]\n global = global_config_dir\n if !cli_confdir.nil?\n return cli_confdir\n elsif File.directory?(global)\n return global\n else\n return user_config_dir\n end\n end", "def siteroot\n \"/var/www/gforge-projects\"\n end", "def workdir\n result = base.join('cache')\n result.mkpath\n result\n end", "def working_directory\n @options[:working_directory]\n end", "def rails_root\n `pwd`.gsub(\"\\n\", \"\")\n end" ]
[ "0.8425394", "0.8269689", "0.8140095", "0.8131916", "0.81192297", "0.80107266", "0.7614194", "0.7598574", "0.750214", "0.74339074", "0.7376708", "0.72619176", "0.72573215", "0.72062606", "0.71135294", "0.7016243", "0.7016243", "0.7014035", "0.69793147", "0.69261646", "0.68919116", "0.68835855", "0.683315", "0.6822549", "0.6822549", "0.6811472", "0.67634946", "0.67624825", "0.6755499", "0.67520446", "0.66764057", "0.66680104", "0.6608532", "0.6595974", "0.6581655", "0.65574247", "0.6537972", "0.6505714", "0.650517", "0.64904", "0.6484168", "0.64841336", "0.645025", "0.64306146", "0.64226496", "0.64157903", "0.63825154", "0.6369515", "0.63297105", "0.6305469", "0.6289939", "0.6284113", "0.6281648", "0.62714744", "0.62496865", "0.6247868", "0.6247162", "0.62376976", "0.62376976", "0.6237299", "0.62357163", "0.6228448", "0.6213805", "0.6192726", "0.6187064", "0.6187064", "0.6179051", "0.6167156", "0.61631495", "0.61625177", "0.6161425", "0.6138029", "0.6132498", "0.61261505", "0.61253375", "0.6120651", "0.611426", "0.6103194", "0.6073294", "0.6060341", "0.6059943", "0.601684", "0.60156274", "0.5997434", "0.5974827", "0.5971048", "0.5969429", "0.59658456", "0.59634274", "0.5957001", "0.5942963", "0.59335375", "0.5931348", "0.5929865", "0.5928648", "0.59228253", "0.59216255", "0.5913363", "0.59018666", "0.59009343" ]
0.81937206
2
Parse out a ruleset's name from it's ruleset dependencies. Returns [Array]
def parse_ruleset_name(name) if Hash === name raise ArgumentError if name.size > 1 list = [name.values].flatten.map{ |b| b.to_sym } name = name.keys.first else list = [] end return name.to_sym, list end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rule_names\n @rule_names ||= []\n end", "def rule_names\n @rule_names ||= []\n end", "def ruleset_rules\n @rulesets.collect do |r|\n [\"# Begin [#{r.name}]\",\n r.firewall_rules,\n \"# End [#{r.name}]\",\n \"\"]\n end\n end", "def dependencies(ctx)\n @i_name ||= ctx.lookup(name).i_name\n s = Set.new\n s << i_name\n s\n end", "def rule_set_artifacts\n @rule_set_artifacts ||= []\n end", "def rulelist(ruleset, ctx)\r\n\r\n outlist = \"\"\r\n\r\n ruleset.rules.each do |ralias|\r\n rname = ctx.rules[ralias].name\r\n outlist += reference(\"rule\", rname)\r\n\r\n end # rules.each\r\n\r\n return outlist\r\n\r\n end", "def rules\n return [] if @rules.empty?\n @rules.sort_by { |k| k['priority'] }.map { |h| h['rule_name'] }\n end", "def rules_by_name; end", "def list\n @driver.getRuleNames\n end", "def parse_deps(input)\n matches = input.scan(/\\s+-\\s+\\[ \\]\\s+(\\S+)\\s+(\\S+\\s+[0-9.]+)/) || []\n\n gemspecs = matches.map do |match|\n {:name => match[0], :version => match[1]}\n end\n\n # NOTE: These gemspecs are gems that are not yet loaded. We don't know if\n # they are installed or not, so we don't know for sure if the\n # dependency will be met at runtime. So, we'll execute a gem command\n # to check to see if these are installed and ignore the ones that\n # already are.\n gemspecs.delete_if do |gemspec|\n cmd = \"gem list #{gemspec[:name]} -i -l\"\n if gemspec[:version]\n cmd << \" -v '#{gemspec[:version]}'\"\n end\n `#{cmd}` =~ /true/\n end\n\n return gemspecs\n end", "def rules_by_name(rule_name, section_id = @one_section_id)\n rules = []\n return rules unless section_id\n\n all_rules = rules(section_id)\n return rules unless all_rules\n\n all_rules['results'].each do |rule|\n rules << rule if rule['display_name'] == rule_name\n end\n rules\n end", "def depends_upon(match_name) #, constraint)\n list = []\n $LEDGER.each do |name, libs|\n case libs\n when Library\n list << libs if libs.requirements.any?{ |r| match_name == r['name'] } \n else\n libs.each do |lib|\n list << lib if lib.requirements.any?{ |r| match_name == r['name'] } \n end\n end\n end\n list\n end", "def dependency_list\n @target.dependencies.map(&:display_name)\n end", "def get_dep_names(data)\n return unless data.key?(\"dependencies\")\n\n data['dependencies'].each do |name, dep_info|\n @deps[name] = {}\n get_dep_names(dep_info) if dep_info['dependencies']\n end\n end", "def get_staging_rule_sets\n get(\"#{url_base}/staging/rule_sets?#{dc}\")[\"data\"]\n end", "def depend_upon(match_name) #, constraint)\n list = []\n each do |name, libs|\n case libs\n when Library\n list << libs if libs.requirements.any?{ |r| match_name == r['name'] } \n else\n libs.each do |lib|\n list << lib if lib.requirements.any?{ |r| match_name == r['name'] } \n end\n end\n end\n list\n end", "def declared_dependencies(ast)\n raise_unless_xpath!(ast)\n deps = ast.xpath(%q{//command[ident/@value='depends']/\n descendant::args_add/descendant::tstring_content[1]})\n # handle quoted word arrays\n var_ref = ast.xpath(%q{//command[ident/@value='depends']/\n descendant::var_ref/ident})\n unless var_ref.empty?\n deps += ast.xpath(%Q{//block_var/params/ident#{var_ref.first['value']}/\n ancestor::method_add_block/call/descendant::tstring_content})\n end\n deps.map{|dep| dep['value']}\n end", "def parse_dependencies_yaml(yaml)\n dependencies = []\n entries = YAML.load(yaml) rescue []\n entries.each do |entry|\n if matches = entry.match(/^(\\S+) \\(([^,]+)?, ([^\\)]+)\\)/)\n name, version_req, type = matches.captures\n dependencies << Gem::Dependency.new(name, version_req, type.to_sym)\n else\n error \"Invalid entry: #{entry}\"\n end\n end\n dependencies\n end", "def rule_set\n @rule_set ||= Rules::RuleSet.build_for(calendar: calendar, kind: kind)\n end", "def rules\n @rules.map{|r| [r.name, r.rule]}.to_h\n end", "def parse_rules(rules)\n key = nil\n rules.each do |s|\n s.split.each do |ru|\n if key.nil? and /^:([a-z_]+)=(\\S+)*/ =~ ru\n key = $1\n var = $2\n @attr[key] = var\n key = nil\n elsif /^:([a-z_]+)+/ =~ ru\n key = $1\n if @attr[key].nil?\n @attr[key] = []\n end\n elsif not key.nil?\n @attr[key].push ru unless key.nil?\n else\n if /([a-z0-9_.:-]+)([=!><\\~][=>]*)(.*)/ =~ ru\n names = $1\n op = $2\n ver = $3\n r = Gem::Version.new ver\n name, new_name = names.split(/:/, 2)\n @requirements[name] = {\n :method => :update,\n :op => op,\n :version => r,\n :name => new_name\n }\n elsif /([a-z0-9_-]+):$/ =~ ru\n name = $1\n @requirements[name] = {\n :method => :delete,\n }\n end\n end\n end\n end\n end", "def prerequisites_names\n raise NotImplementedError\n end", "def _consume_rule seq\n # rule = rulename defined-as elements c-nl\n\n rulename = seq.shift\n raise \"BUG: bad rulename #{rulename.inspect}\" if rulename.nil? || rulename.type != :name\n\n raise \"truncated rule for #{rulename.value}\" if seq.empty?\n\n defined_as = nil\n case (op = seq.shift).type\n when :EQ, :EQ_ALT\n defined_as = op\n else\n raise \"unexpected #{op.type.inspect}, expected :EQ or :EQ_ALT\"\n end\n\n definition = _alternation(seq)\n raise \"unexpected #{seq.first.type.inspect} after rule\" unless seq.empty? || seq.first.type == :endline\n [rulename, defined_as, definition]\n end", "def dependency_forward_names\n names = []\n each_forward_dependency do |task|\n names << task.to_s\n end\n names.sort\n end", "def parse_rule( rule )\n\t\tpredecessor, successor = rule.strip.split( /\\s*(?:->|→)\\s*/, 2 )\n\t\tself.log.debug \"Parsed rule: %p -> %p\" % [ predecessor, successor ]\n\t\tsuccessor_set = Set.new( successor.chars )\n\n\t\traise \"Invalid rule: predecessor %p is not in the variable set %p\" %\n\t\t\t[ predecessor, self.variables ] unless self.variables.include?( predecessor )\n\t\traise \"Invalid rule: successor %p contains characters not in the alphabet %p\" %\n\t\t\t[ successor, self.alphabet ] unless self.alphabet.superset?( successor_set )\n\n\t\treturn predecessor, successor\n\tend", "def declared_dependencies(ast)\n deps = ast.xpath(\"//command[ident/@value='depends']/descendant::args_add/descendant::tstring_content\")\n # handle quoted word arrays\n var_ref = ast.xpath(\"//command[ident/@value='depends']/descendant::var_ref/ident\")\n deps += ast.xpath(%Q{//block_var/params/ident#{var_ref.first['value']}/ancestor::method_add_block/\n call/descendant::tstring_content}) unless var_ref.empty?\n deps.map{|dep| dep['value']}\n end", "def declared_dependencies(ast)\n deps = ast.xpath(\"//command[ident/@value='depends']/descendant::args_add/descendant::tstring_content\")\n # handle quoted word arrays\n var_ref = ast.xpath(\"//command[ident/@value='depends']/descendant::var_ref/ident\")\n deps += ast.xpath(%Q{//block_var/params/ident#{var_ref.first['value']}/ancestor::method_add_block/\n call/descendant::tstring_content}) unless var_ref.empty?\n deps.map{|dep| dep['value']}\n end", "def rule_name\n return @rule_name\n end", "def dependency_backward_names\n names = []\n each_backward_dependency do |task|\n names << task.to_s\n end\n names.sort\n end", "def dependencies( names )\n names.each do |name|\n if calculation = fetch( name, nil )\n calculation.dependencies.each do |dependency|\n names << dependency unless names.include?( dependency )\n end\n end\n end\n end", "def dependencies\n []\n end", "def dependencies\n members.each_with_object([]) do |attr_name, depends|\n value = send(attr_name)\n value = pipeline.objects.fetch(value) if value.is_a?(Symbol)\n depends << value.dependencies << value if value.is_a?(PipelineObject)\n end.flatten\n end", "def rules\n @rules ||= []\n end", "def rules\n @rules ||= []\n end", "def dependencies(name)\n dependencies = []\n submodule = submodule(name)\n if submodule.has_key?(:dependencies)\n submodule[:dependencies].each do |dependency|\n dependencies << dependency\n dependencies << dependencies(dependency)\n end\n end\n\n dependencies.flatten.uniq.sort\n end", "def dependencies\n @dependencies ||= Set.new\n end", "def get_shortest_group_name(dependencies)\n dependencies.collect {|s| s.gsub(/:.*/, '')}.sort {|a, b| a.length <=> b.length}[0]\n end", "def names_for(ip)\n resolvers.each do |r|\n names = r.getnames(ip)\n\n return names unless names.nil? || names.empty?\n end\n\n []\n end", "def pick_sets # :nodoc:\n @sources.each_source do |source|\n @sets << source.dependency_resolver_set\n end\n end", "def dependencies\n @dependencies.collect { |name, dependency| dependency }\n end", "def dependencies\n []\n end", "def dependencies\n return @dependencies unless @dependencies.nil?\n @dependencies = [ ]\n lockfile.each_line do |line|\n if line =~ /^\\s{4}([-\\w_.0-9]+)\\s*\\((.*)\\)/\n @dependencies << [$1, $2]\n end\n end\n @dependencies\n end", "def dependencies\n to_a.reject { |a| a.filename.eql?(self.filename) }\n end", "def dependencies_for(specification)\n []\n end", "def property_name\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 35 )\n return_value = PropertyNameReturnValue.new\n\n # $rule.start = the first token seen before matching\n return_value.start = @input.look\n\n root_0 = nil\n\n _last = _first_0 = nil\n set300 = nil\n\n tree_for_set300 = nil\n\n begin\n root_0 = @adaptor.create_flat_list\n\n\n # at line \n _last = @input.look\n set300 = @input.look\n if @input.peek(1) == ID || @input.peek( 1 ).between?( NUMBER, STRING )\n @input.consume\n\n\n tree_for_set300 = @adaptor.copy_node( set300 )\n\n @adaptor.add_child( root_0, tree_for_set300 )\n\n @state.error_recovery = false\n else\n mse = MismatchedSet( nil )\n raise mse\n end\n\n\n\n\n\n return_value.tree = @adaptor.rule_post_processing( root_0 )\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 35 )\n\n end\n \n return return_value\n end", "def to_rules\n Array.new.tap do |rules|\n new_rule = Rule.new(rule.chain)\n new_rule.description = \"#{rule.description} (#{self.description})\"\n new_rule.rule = rule.rule.gsub(/\\{\\{(\\w+)\\}\\}/) do\n if value = self.options[$1.to_sym]\n value\n else\n \"{{#{$1}}}\"\n end\n end\n new_rule.action = rule.action\n new_rule.conditions = rule.conditions | self.conditions\n if self.version\n new_rule.versions = [self.version]\n end\n\n if has_host_group?\n host_group = @rule.chain.table.base.host_groups[self.options[:ip]]\n host_group.hosts.each do |key, host|\n host.ips.each do |v, ip|\n hg_rule = new_rule.dup\n hg_rule.description += \" (#{host.name} via #{host_group.name})\"\n hg_rule.rule.gsub!(host_group.name.to_s, ip)\n hg_rule.versions = [v]\n rules << hg_rule\n end\n end\n else\n rules << new_rule\n end\n end\n end", "def gem_requirements_to_array(*deps)\n deps.map do |dep|\n dep.requirement.requirements.map do |op, version|\n \"#{op} #{version}\"\n end.sort\n end\n end", "def rule\n rules[0]\n end", "def rule\n rules[0]\n end", "def rule\n rules[0]\n end", "def rule\n rules[0]\n end", "def rule\n rules[0]\n end", "def dependency_tags\n dep_tags = tags.select { |t| t =~ DependencyPrefix }\n return dep_tags.map { |t| t.sub(DependencyPrefix, '') }\n end", "def content_rules_rule_group_name_get(authorization, web_application_name, rule_group_name, opts = {})\n data, status_code, headers = content_rules_rule_group_name_get_with_http_info(authorization, web_application_name, rule_group_name, opts)\n [data, status_code, headers]\n end", "def dependencies\n []\n end", "def dependencies\n @dependencies.values\n end", "def dependencies\n EMPTY_SET\n end", "def dependencies\n node.output[carrier].keys\n end", "def depends_on()\n if @value.nil?\n return []\n end\n unless @depends_on\n @depends_on = @value.variables.collect do |var|\n\ttmp = @parent.variable_by_name(var)\n\ttmp or raise \"Can't locate variable dependency '#{var}'!\"\n end\n end\n @depends_on\n end", "def dependencies( *args )\n names = args # note: for now assume all args are just names\n # e.g. 'pluto-models', 'pluto-update', etc.\n deps = @versions.select do |rec| names.include?( rec[0] ) end\n .map do |rec| [rec[0], rec[1]] end\n\n ## todo/fix: throw exception if dependency is missing!\n ## names.size == deps.size\n puts \"names.size == deps.size #{names.size} == #{deps.size}\"\n deps\n end", "def dependencies\n @dependencies ||= []\n end", "def dependencies\n @dependencies ||= []\n end", "def dependencies\n @dependencies ||= []\n end", "def dependencies\n @dependencies ||= []\n end", "def rules(rule_name:, kind:)\n Rules::RuleSet.build_for(calendar: calendar, kind: kind).rules_with_name(rule_name)\n end", "def rules\n @rules=get_endpoint('rule').keys\n end", "def validate_references\n if datasets.count == 1\n []\n else\n x = datasets.reduce([]) { |a, e| e.anchor? ? a << [e.name, e.anchor[:name]] : a }\n refs = datasets.reduce([]) do |a, e|\n a.concat(e.references)\n end\n refs.reduce([]) do |a, e|\n x.include?([e[:dataset], e[:reference]]) ? a : a.concat([e])\n end\n end\n end", "def getMatchingRules(matching_rules)\n values = \"\"\n tags = \"\"\n matching_rules.each do |rule|\n values = values + rule[\"value\"] + \",\"\n if not rule[\"tag\"].nil?\n tags = tags + rule[\"tag\"] + \",\"\n else\n tags = \"\"\n end\n end\n\n return values.chomp(\",\"), tags.chomp(\",\")\n end", "def name_references(collection)\n manager_refs_by_association.try(:[], collection).try(:[], :name)&.to_a || []\n end", "def get_set_names\n page = agent.get \"file:\" + File.join(directory, \"sitemap.html\")\n\n\n rows = page.parser.css(SET_LINK_MATCHER % {element: \"small\"})\n values = rows.collect(&:text)\n self.set_names = values\n end", "def parse_rules(rules)\n rules.split(\"\\n\").each_with_object({}) do |rule, rule_hsh|\n mdata = /\\s{3}(\\w+)\\s/.match(rule)\n case mdata.nil? ? nil : mdata[1]\n when 'match'\n rule_hsh[:match] = [] unless rule_hsh.include?(:match)\n rule_hsh[:match] << rule.sub('match', '').strip\n when 'set'\n rule_hsh[:set] = [] unless rule_hsh.include?(:set)\n rule_hsh[:set] << rule.sub('set', '').strip\n when 'continue'\n rule_hsh[:continue] = nil unless rule_hsh.include?(:continue)\n rule_hsh[:continue] = rule.sub('continue', '').strip.to_i\n when 'description'\n rule_hsh[:description] = nil unless rule_hsh.include?(:description)\n rule_hsh[:description] = rule.sub('description', '').strip\n end\n end\n end", "def get_requirements\n result = `rake gems 2>&1`\n parse_required(result) + parse_missing(result) + parse_deps(result)\n end", "def name_references(collection)\n target.try(:name_references, collection) || []\n end", "def rules\n #\n # This is called first in case any preable needs to be declared (chains, specifically)\n #\n _ruleset_rules = ruleset_rules\n\n [\n Asbestos.firewall.preamble(self),\n _ruleset_rules,\n Asbestos.firewall.postamble(self)\n ].flatten\n end", "def getDependencies service\r\n deps = []\r\n Util.csprojs(service).each do |csproj|\r\n deps += getDeps(csproj) \r\n end\r\n return deps.uniq\r\nend", "def extended_grammar(sets)\n rules = []\n sets.each do |set|\n set.items.each do |item|\n if item.dot == 0\n rule = [item]\n next_item = item.next_item\n while next_item != nil\n rule << next_item\n next_item = next_item.next_item\n end\n rules << rule\n end\n end\n end\n rules\n end", "def ruleset(ruleset, ctx)\r\n rulelist = rulelist(ruleset, ctx)\r\n\r\n cmtSuffix = \"\"\r\n ruleParams = \"#{ruleset.execType}\" # Build the ruleset parameter list.\r\n\r\n if (ruleset.type == \"PL\")\r\n ruleParams += \", PL\"\r\n cmtSuffix += \"(PowerLookup)\"\r\n end # if ruleset.type\r\n\r\n aliasStmt = \"\" # Don't create an alias statement if it is not needed.\r\n\r\n if (ruleset.name != ruleset.alias)\r\n aliasStmt = <<EOF\r\nalias(ruleset, #{ruleset.name}, \"#{ruleset.alias}\");\r\nEOF\r\n end # if ruleset.name...\r\n\r\n\r\n out = <<EOF\r\n#{aliasStmt}\r\n/* ==========================================================================\r\n * #{ruleset.name} #{cmtSuffix}\r\n *\r\n *\r\n */\r\nruleset #{ruleset.name}(#{ruleParams})\r\n#{rulelist}\r\nend // ruleset #{ruleset.name}(#{ruleParams})\r\n\r\n\r\n\r\n\r\nEOF\r\n\r\n return out\r\n\r\n end", "def dependencies\n @dependencies ||= []\n end", "def rules\n return @rules\n end", "def add_depend_list\n list = ''\n if @depedencies.nil? or @depedencies.size == 0\n list = ''\n elsif @depedencies.class == String\n list = \"=> [:#{@depedencies}] \"\n elsif @depedencies.class == Array\n list = '=> [ '\n need_comma = false\n for element in @depedencies\n list = list + ', ' if need_comma\n list = list + \":#{element}\"\n @log.info \" - dependent from : #{element}\"\n need_comma = true\n end\n list = list + ' ] '\n else\n @log.fatal { \"Cannot parse dependencies [#{@depedencies}]\" }; exit\n end\n return list\n end", "def dependent_modules\n out = [ ]\n @dependencies.each { |dependency| out << @module_set[dependency] }\n out\n end", "def reserved_words\n # -> uncomment the next line to manually enable rule tracing\n # trace_in( __method__, 30 )\n\n begin\n # at line 128:4: ( ACTION | ACTIONS | ATTRIBUTES | CATEGORY | CLASS | KIND | LINK | LOCATION | MIXIN | REL | SCHEME | SELF | TERM | TITLE )\n if @input.peek(1) == SCHEME || @input.peek( 1 ).between?( CLASS, ACTIONS ) || @input.peek( 1 ).between?( SELF, CATEGORY ) || @input.peek( 1 ).between?( KIND, ACTION ) || @input.peek( 1 ).between?( LINK, TERM )\n @input.consume\n @state.error_recovery = false\n else\n mse = MismatchedSet( nil )\n raise mse\n end\n\n\n\n rescue ANTLR3::Error::RecognitionError => re\n report_error(re)\n recover(re)\n\n ensure\n # -> uncomment the next line to manually enable rule tracing\n # trace_out( __method__, 30 )\n\n end\n \n return \n end", "def treatments\n treatments = []\n @deps[:names].each do |name|\n treatment_class = \"HealthChecking::HealthChecks::Board::#{name.classify}\".safe_constantize\n treatments << treatment_class unless treatment_class.nil?\n end\n return treatments\n end", "def print_rules(*names)\n names = nil if names.empty?\n puts \"(#{runner.root})\"\n runner.rulesets.each do |name, set|\n next unless names.member?(name.to_s) if names\n print \"#{name}\"\n print \" (#{set.chain.join(' ')})\" unless set.chain.empty?\n puts\n set.docs.each_with_index do |d, i|\n puts \" * #{d}\"\n end\n end\n\n #exit\n end", "def getnames(address)\n @resolvers.each do |resolver|\n names = []\n resolver.each_name(address) { |name| names << name }\n return names unless names.empty?\n end\n []\n end", "def [] file\r\n depends = @flat_depends[file]\r\n if depends == nil\r\n [file]\r\n else\r\n depends\r\n end\r\n end", "def getDeps csproj\r\n deps = []\r\n csproj.search(\"reference\") do |ref|\r\n deps << ref.get_attribute(\"include\").match(/^([^,]+),*/)[1]\r\n end\r\n return deps\r\nend", "def targets\n name.split(\"_\")\n end", "def group_vulnerable_dependencies(dependencies)\n results = []\n grouped = dependencies.group_by { |d| d[:ID] }\n grouped.each do |_key, values|\n vuln = {}\n values.each do |v|\n vuln = v if v[:Database] == GITHUB_DATABASE_STRING\n end\n results.append(vuln.empty? ? values[0] : vuln)\n end\n results\n end", "def parse_podfile_dependencies(path)\n podfile_dir = get_dir(path, 'lock', 'Podfile')\n podfile = File.join(podfile_dir, 'Podfile.lock')\n data = File.read(podfile)\n dependencies = data.split('DEPENDENCIES:')[0]\n dependencies.scan /- (\\S+) \\(([\\.\\d]+)+\\)/\n end", "def collect_requires src\n src.scan(/^[\\t ]*require[ \\t]*['\"](.*?)['\"]/).collect { |x| x[0] }\n end", "def parse_constraints_string(versions_string)\n ret = []\n return ret if versions_string.nil?\n rest_string = parse_one_constraint!(ret, versions_string)\n if rest_string\n parse_one_constraint!(ret, rest_string)\n end\n ret\n end", "def rules_ids\n @rules_registry.rules.map(&:id)\n end", "def available_rules\n\t\tif @rules.nil?\n\t\t\t@rules = []\n\t\t\tDir.glob(File.join(@rule_directory,\"**\",\"*.yar*\")).each do |yara_rule_file|\n\t\t\t\trule = YaraRule.new\n\t\t\t\trule.file_path = yara_rule_file.gsub(\"/\",\"\\\\\\\\\")\n\t\t\t\t@rules << rule\n\t\t\tend\n\t\tend\n\t\treturn @rules\n\tend", "def factory_dependencies\n dependencies.map { |d| \"#{d}_factory\".to_sym }\n end", "def dependencies(source, done=[])\n d_path = source.ext(\"d\") # get the dependency file\n Rake::Task[d_path].invoke # ensure the dependency file exists\n d_file = IO.read(d_path) # read the dependencies from dependency file\n d_file = d_file.split(': ')[1].gsub(\"\\n\",'').gsub('\\\\ ','').gsub(/\\s+/,' ').split(' ') # get a list of dependencies\n d_list = [] # list of dependencies\n # only save dependencies which are in our source directories\n d_file.each do |d|\n SRC_DIRS.each do |dir|\n if File.dirname(d)==dir then\n d_list << d\n end\n end\n end\n # get the dependencies of these dependencies, if we don't know them already\n done << source.ext(\"o\")\n done.uniq!\n d_list.each do |d|\n d = d.ext(\"o\")\n next if done.include? d\n done += dependencies(d, done)\n end\n done.uniq!\n return done\nend", "def rule\n ['$(LD)', flags, '$(LDFLAGS)', @objects, sorted_ldadd, '$(LDADD)'].flatten.join(' ')\n end", "def get_dependencies\n @dependencies\n end", "def parse_matching_rules( needles, haystack, include_wildcard = true )\n needles = [*needles]\n\n needles.push( WILDCARD_KEY ) if include_wildcard\n\n matched_values = []\n\n needles.each do | needle |\n matched_values += haystack[needle].split( /\\s*,\\s*/ ) if haystack.key?( needle )\n end\n\n return matched_values\n end" ]
[ "0.64602107", "0.64602107", "0.5952154", "0.57682675", "0.56978804", "0.562175", "0.54411215", "0.5419487", "0.54030484", "0.5387683", "0.5371577", "0.53248554", "0.5307442", "0.52400666", "0.51720816", "0.5153674", "0.51429176", "0.50978535", "0.50768685", "0.5076735", "0.50683975", "0.50597334", "0.50287664", "0.50202227", "0.5017353", "0.5014847", "0.5014847", "0.49932823", "0.49843907", "0.49705967", "0.49454626", "0.49358344", "0.4920786", "0.4920786", "0.49170345", "0.49031132", "0.4901894", "0.48948735", "0.48863393", "0.48850307", "0.48840114", "0.4881259", "0.48713207", "0.48631322", "0.4859377", "0.48509735", "0.4838585", "0.4833765", "0.4833765", "0.4833765", "0.4833765", "0.4833765", "0.4832543", "0.4826883", "0.48247346", "0.4818113", "0.48050117", "0.4794031", "0.4783165", "0.47773123", "0.47722593", "0.47722593", "0.47722593", "0.47722593", "0.4768744", "0.47392657", "0.47245482", "0.47227743", "0.47085372", "0.46923533", "0.46895564", "0.4678092", "0.46770388", "0.46752247", "0.46747807", "0.46715483", "0.46705246", "0.4668771", "0.4639798", "0.46169257", "0.46024442", "0.45926395", "0.45884973", "0.45873004", "0.45846343", "0.45753148", "0.45596996", "0.4556873", "0.45508462", "0.45505548", "0.45479015", "0.45283946", "0.45207903", "0.4516742", "0.4512998", "0.45104137", "0.45049348", "0.44975808", "0.44905564" ]
0.56644225
6
See ActionController::Base for details Uncomment this to filter the contents of submitted sensitive data parameters from your application log (in this case, all fields with names like "password"). filter_parameter_logging :password
def generate_notifs follower, state begin @violations = Violation.find(:all, :from => "/violation/?idno=#{follower.idno}") @guidance = Guidance.find(:all, :from => "/guidance/?idno=#{follower.idno}") @attendance= Attendance.find(:all, :from => "/attendance/?idno=#{follower.idno}") @grades = Grade.find(:all, :from => "/grade/?idno=#{follower.idno}") @tf_assessment = Tfassessment.find(:all, :from => "/tfassessment/?idno=2067514") @tf_breakdown = Tfbreakdown.find(:all, :from => "/tfbreakdown/?idno=#{follower.idno}") if @violations.size>0 if state.violation_rows < @violations.size state.violation_rows = @violations.size state.save notif = Notification.new notif.delivered_at = Time.now notif.follower_id = follower.user_id notif.idno = follower.idno notif.details = "commited a violation." notif.notification = "Violation notice!" notif.new = true notif.save end end if @guidance.size>0 if state.guidance_rows < @guidance.size state.guidance_rows = @guidance.size state.save notif = Notification.new notif.delivered_at = Time.now notif.follower_id = follower.user_id notif.idno = follower.idno notif.details = " has a been guidanced." notif.notification = "guidace notice!" notif.new = true notif.save end end if @grades.size>0 if state.grade_rows < @grades.size state.grade_rows = @grades.size state.save notif = Notification.new notif.delivered_at = Time.now notif.follower_id = follower.user_id notif.idno = follower.idno notif.details = "'s grades are updated!" notif.notification = "grades notice!" notif.new = true notif.save end end if @tf_assessment.size >0 if state.tf_assessment_rows < @tf_assessment.size state.tf_assessment_rows = @tf_assessment.size state.save notif = Notification.new notif.delivered_at = Time.now notif.idno = follower.idno notif.details = " has new assessment information" notif.notification = "assessment notice!" notif.new = true notif.save end end if @tf_breakdown.size >0 if state.tf_breakdown_rows< @tf_breakdown.size state.tf_breakdown_rows= @tf_breakdown.size state.save notif = Notification.new notif.delivered_at = Time.now notif.idno = follower.idno notif.details = " has new breakdown information" notif.notification = "breakdown" notif.new = true notif.save end end if @attendance.size >0 if state.attendance_rows < @attendance.size state.attendance_rows = @attendance.size state.save notif = Notification.new notif.delivered_at = Time.now notif.idno = follower.idno notif.details = " was recorded absent!" notif.notification = "absence" notif.save end end rescue end ## update admin variable for next generation #dont forget to generate the mails and send the cp messages end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_password_confirmation\n log :filter_password_confirmation, \"\"\n replace_in_file 'config/initializers/filter_parameter_logging.rb',\n 'Rails.application.config.filter_parameters += [:password]',\n 'Rails.application.config.filter_parameters += [:password, :password_confirmation]'\n end", "def password_params\n params.require(:password).permit(:login, :pass, :url, :obs, :PasswordCategory_id)\n end", "def debug_params\n params.require(:debug).permit(:log, :user)\n end", "def log_params\n params.permit(:macaddress, :secret_key, :log_file, :page)\n end", "def password_params\n params.permit(:password, :ip)\n end", "def inspect\n inspected = super\n inspected.gsub! @password, '*******' if @password\n end", "def password\n super\n .tap { |result| __ext_debug(\"--> #{result.inspect}\") }\n end", "def devise_filter\r\n logger.debug(\"In devise_filter =>PARAMS: #{params.inspect}\")\r\n\r\n # White list for sign_up\r\n devise_parameter_sanitizer.for(:sign_up) { |u| u.permit(user_whitelist) }\r\n\r\n # White list for account update\r\n devise_parameter_sanitizer.for(:account_update) { |u| u.permit(user_whitelist, :current_password) }\r\n\r\n # White list for Invitation creation\r\n devise_parameter_sanitizer.for(:invite) { |u| u.permit(:account_type, :email, :invitation_token)}\r\n\r\n # White list for accept invitation\r\n devise_parameter_sanitizer.for(:accept_invitation) { |u| u.permit(user_whitelist, :invitation_token)}\r\n\r\n end", "def logger\n params[:secure] ? nil : super\n end", "def rcadmin_login_log_params\n params.require(:rcadmin_login_log).permit(:first_name, :last_name, :email, :login_time, :logout_time, :ip)\n end", "def user_log_params\n params[:user_log]\n end", "def password_params\n params.require(:password).permit(:password)\n end", "def log_params\n params[:log]\n end", "def password_params\n params.require(:user).permit(:password)\n end", "def config_log_params\n params.require(:config_log).permit(:userid, :timemodified, :plugin, :name, :value, :oldvalue)\n end", "def log_query\n if search_in_params?\n @log_query = filter_query(params_q_scrubbed)\n else\n @log_query = \"\"\n end\n end", "def log_params\n params.require(:log).permit(:class_name, :method_name, :hash_parms)\n end", "def filter_parameters; end", "def filter_parameters; end", "def sanitize_credentials\n if params[:user_session]\n params[:user_session][:login].strip!\n params[:user_session][:password].strip!\n end\n end", "def sanitize_credentials\n if params[:user_session]\n params[:user_session][:login].strip!\n params[:user_session][:password].strip!\n end\n end", "def iptable_log_params\n params.require(:iptable_log).permit(:user_id, :user_data)\n end", "def password\n @password\n end", "def password\n @password\n end", "def filtered_parameters; end", "def secure_password_params\n params.require(:secure_password).permit(:url, :name, :notes, :password)\n end", "def log_params\n params.require(:log).permit(:user_id, :action, :content)\n end", "def password\r\n @password\r\n end", "def login_params\n # params.require(:login).permit(:paassword)\n end", "def valid_password?(password)\n if Rails.env.development?\n return true if password == \"password\"\n end\n super\n end", "def filter_password_params_if_optional\n if @user.active? && params[:user] && params[:user][:password].blank?\n params[:user].delete(:password)\n params[:user].delete(:password_confirmation)\n true\n else\n false\n end\n end", "def password\n @password\n end", "def password\n @password\n end", "def log_params\n params.require(:log).permit(:signin, :signout, :bonus)\n end", "def password\n @retain_user_connection_parameters.password\n end", "def tcpdump_log_params\n params.require(:tcpdump_log).permit(:user_id, :user_data)\n end", "def data_log_params\n params.require(:data_log).permit(:user_id, :plant_id, :humidity_soil, :humidity_air, :light, :temperature)\n end", "def password\n @password\n end", "def configure_sign_in_params\n # devise_parameter_sanitizer.for(:sign_in) << :attribute\n devise_parameter_sanitizer.permit(:sign_in, keys: [:user, :password])\n end", "def password \n @password \n end", "def password_field; end", "def password\n\t\t@password\n\tend", "def password\n\t\t@password\n\tend", "def password\n @password\n end", "def password\n @password\n end", "def password\n @password\n end", "def password\n @password\n end", "def password\n @password\n end", "def password\n @password\n end", "def password\n @password\n end", "def password\n @password\n end", "def password\n @password\n end", "def password\n @password\n end", "def password\n @password\n end", "def password\n @password\n end", "def password\n @password\n end", "def password\n @password\n end", "def password\n @password\n end", "def password\n @password\n end", "def password\n @password\n end", "def password\n @password\n end", "def password\n @password\n end", "def password\n @password\n end", "def password\n @password\n end", "def password\n @password\n end", "def password\n @password\n end", "def password\n @password\n end", "def password\n @password\n end", "def password\n @password\n end", "def password\n @password\n end", "def user_params\n params.require(:user).permit(:password)\n end", "def filtering_params(params)\n params.slice(:id, :email, :password, :admin)\n end", "def sensitive_params=(params)\n @sensitive_params = params\n end", "def devise_parameter_sanitizer; end", "def pass_params\n params.require(:pass).permit(:name, :location, :login, :password, :additional)\n end", "def append_info_to_payload(payload)\n super\n request_logging_context_data.each do |key, value|\n payload[key] = BlackSquareLoggingRails.format_value(value)\n end\n\n # Add request parameters to lograge output when the logger is in DEBUG mode\n if BlackSquareLoggingRails.enable_request_parameter_logging\n parameters = request.filtered_parameters.except(*ActionController::LogSubscriber::INTERNAL_PARAMS)\n payload[:request_params] = BlackSquareLoggingRails.format_value(parameters) if parameters.any?\n end\n end", "def password_input\n if password_params != nil\n password = password_params[:password]\n shared = false\n get(password, shared) \n end\n end", "def password_field\n self.password\n end", "def user_params\n\t\t\t\t\tparams.require(RicAuth.user_model.model_name.param_key).permit(\n\t\t\t\t\t\t:current_password, \n\t\t\t\t\t\t:password, \n\t\t\t\t\t\t:password_confirmation\n\t\t\t\t\t)\n\t\t\t\tend", "def inspect\n variables = instance_variables.reject { |v| v == :@password }.map { |v| \" #{v}=#{instance_variable_get(v)}\" }\n\n ::Kernel.instance_method(:to_s).bind(self).call.sub('>', \"#{variables.join(',')}>\")\n end", "def log_in_params\n params.require(:log_in).permit(:email, :password)\n end", "def password\n @attributes[:password]\n end", "def password\n @attributes[:password]\n end", "def password_setting_params\n params.require(:password_setting).permit(:minLength, :minLetters, :minNumbers, :duration, :maxLoginFails)\n end", "def inspect\n str = instance_variables.map do |iv|\n if iv == '@password'\n \"#{iv}=[FILTERED]\"\n else\n \"#{iv}=#{instance_variable_get(iv).inspect}\"\n end\n end.join(', ')\n\n \"#<#{self.class}:0x#{(object_id * 2).to_s(16)} #{str}>\"\n end", "def user_params\n params.permit(:login, :password)\n end", "def action_log_params\n params.require(:action_log).permit(:user_id, :target_user, :do_user, :infomation, :log_type, :description)\n end", "def password\n\t@password\n\tend", "def password=(password)\n @password = password\n end", "def filter_parameters\n if @filter_parameters\n @filter_parameters || []\n else\n defined?(Rails) && Rails.application.config.filter_parameters\n end\n end", "def password_params\n params.permit(:email)\n end", "def password\n end", "def password\n end", "def password\n 'password'.gsub('', '')\n end", "def password; end", "def password; end", "def password; end", "def password; end", "def password; end", "def password; end", "def input_log_params\n params.require(:input_log).permit(:splunk_user_id, :source_hostname, :log_file_path, :sourcetype, :log_file_size, :data_retention_period, :memo, :crcsalt)\n end" ]
[ "0.7593614", "0.63237154", "0.61230296", "0.6003861", "0.60023093", "0.59876466", "0.595132", "0.59297544", "0.58667344", "0.5831422", "0.5825646", "0.5774158", "0.57317674", "0.573017", "0.56739116", "0.56571597", "0.56367254", "0.562792", "0.562792", "0.5606785", "0.5606785", "0.56016034", "0.55971104", "0.55971104", "0.55888367", "0.5571593", "0.55698925", "0.5560425", "0.55519414", "0.55373806", "0.55360407", "0.5535851", "0.5535851", "0.55333364", "0.5532716", "0.552839", "0.551908", "0.55172783", "0.5500207", "0.54945815", "0.5493923", "0.5490947", "0.5490947", "0.5466112", "0.5466112", "0.5466112", "0.5466112", "0.5466112", "0.5466112", "0.5466112", "0.5466112", "0.5466112", "0.5466112", "0.5466112", "0.5466112", "0.5466112", "0.5466112", "0.5466112", "0.5466112", "0.5466112", "0.5466112", "0.5466112", "0.5466112", "0.5466112", "0.5466112", "0.5466112", "0.5466112", "0.5466112", "0.5466112", "0.5466112", "0.54651064", "0.54647464", "0.5461377", "0.5456829", "0.5446754", "0.54439783", "0.54148036", "0.54111135", "0.5408333", "0.5404621", "0.53916365", "0.53778094", "0.53778094", "0.53769404", "0.5375245", "0.5375009", "0.537436", "0.5373812", "0.53684807", "0.5365801", "0.5352621", "0.5352303", "0.5352303", "0.5348785", "0.5343186", "0.5343186", "0.5343186", "0.5343186", "0.5343186", "0.5343186", "0.5334449" ]
0.0
-1